ngram
listlengths
0
67.8k
[ "in parsed_fields: form_index = None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None)", "Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries from", "containing HTML markup. :param parser: A string containing a valid BeautifulSoup parsing library", "\"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] =", "markup: str, parser: str = None) -> List[FormData]: \"\"\" Convert a HTML page", "enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to", "page. \"\"\" def __init__(self, markup: str = None, parser: str = None): \"\"\"", "parsers. :returns: A collection of Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\",", "self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form", "form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\",", "parsed_fields: form_index = None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if", "parsed_form: Tag) -> FormData: \"\"\" Create Form Data from parsed form node object.", "Create Form Data Entries from pasred form input element. :param parsed_form_field: A BeautifulSoup", "parsed_field.find_parent(\"form\") if parent_form is not None: form_index = parsed_forms.index(parsed_form) if form_index is not", "== \"action\": form_data.action = val.strip() elif match_key == \"method\": form_data.method = val.strip().upper() elif", "None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\"", "is not None: self.parse(markup, parser) def parse(self, markup: str, parser: str = None)", "form_data.name = val elif match_key == \"action\": form_data.action = val.strip() elif match_key ==", "form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not None: form_index =", "\"name\": form_data.name = val elif match_key == \"action\": form_data.action = val.strip() elif match_key", "input field. :param field_parsers: A collection of HTML input element parsers. :returns: A", "form_index = None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index", "or specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields:", "is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) ->", ":returns: A collection of Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None)", "to select a BeutifulSoup Parser. :returns: A collection of ForData objects. The same", "parsed form node object. :param parsed_form: A BeautifulSoup object containing a form. :returns:", "Convert a HTML page into Form Data objects :param markup: A string containing", "match_key == \"action\": form_data.action = val.strip() elif match_key == \"method\": form_data.method = val.strip().upper()", "not None: form_index = parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers))", "form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser)", "collection of Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser", "containing form node, or specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for", "None: self.parse(markup, parser) def parse(self, markup: str, parser: str = None) -> List[FormData]:", "bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\",", "parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {}", "form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser =", "into Form Data objects :param markup: A string containing HTML markup. :param parser:", "BeautifulSoup object containing a form. :returns: A FormData object \"\"\" form_data = FormData()", "List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import", "__init__(self, markup: str = None, parser: str = None): \"\"\" :param markup: A", "input element parsers. :returns: A collection of Form Data Entry objects \"\"\" field_type", "if parser is None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(),", "Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if", "parser: str = None): \"\"\" :param markup: A string containing HTML markup. :param", "objects are stored within the object. \"\"\" if parser is None: parser =", "markup. :param parser: A string containing a valid BeautifulSoup parsing library name. \"\"\"", "import re from typing import List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data", "== \"name\": form_data.name = val elif match_key == \"action\": form_data.action = val.strip() elif", "A string containing a valid BeautifulSoup parsing library name. \"\"\" self.forms = []", "HTML page. \"\"\" def __init__(self, markup: str = None, parser: str = None):", "FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML forms", ":param parser: A string containing a valid BeautifulSoup parsing library name. \"\"\" self.forms", "HTML forms from a HTML page. \"\"\" def __init__(self, markup: str = None,", "if markup is not None: self.parse(markup, parser) def parse(self, markup: str, parser: str", "-> FormData: \"\"\" Create Form Data from parsed form node object. :param parsed_form:", "library name. \"\"\" self.forms = [] if markup is not None: self.parse(markup, parser)", "containing HTML markup. :param parser: A string property to select a BeutifulSoup Parser.", "\"\"\" form_data = FormData() for key, val in parsed_form.attrs.items(): match_key = key.lower() if", "= BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", ))", "import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers", "val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field:", "from typing import List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData", "match_key == \"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers:", "select a BeutifulSoup Parser. :returns: A collection of ForData objects. The same objects", "for parser in field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return", "form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create", "field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name,", "self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create", "parser: str = None) -> List[FormData]: \"\"\" Convert a HTML page into Form", "form_data = FormData() for key, val in parsed_form.attrs.items(): match_key = key.lower() if match_key", "form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields", "== \"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser]", "Entries from pasred form input element. :param parsed_form_field: A BeautifulSoup object containing an", "containing a form. :returns: A FormData object \"\"\" form_data = FormData() for key,", "form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms =", "index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing form node, or specify", "parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields", "self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing form node, or specify their", "val in parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\": form_data.name = val", "\"\"\" Convert a HTML page into Form Data objects :param markup: A string", "FormData() for key, val in parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\":", "None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(),", "str, parser: str = None) -> List[FormData]: \"\"\" Convert a HTML page into", "match_key = key.lower() if match_key == \"name\": form_data.name = val elif match_key ==", "if parent_form is not None: form_index = parsed_forms.index(parsed_form) if form_index is not None:", "is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not None: form_index = parsed_forms.index(parsed_form)", "None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name, None): return parser.parse(parsed_form_field) return []", "to the nearest containing form node, or specify their form owner by attribute.", "return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\"", "A FormData object \"\"\" form_data = FormData() for key, val in parsed_form.attrs.items(): match_key", "form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not", "for parsed_field in parsed_fields: form_index = None if \"form\" in parsed_field.attrs: form_index =", "def __init__(self, markup: str = None, parser: str = None): \"\"\" :param markup:", "\"\"\" def __init__(self, markup: str = None, parser: str = None): \"\"\" :param", "input element. :param parsed_form_field: A BeautifulSoup object containing an input field. :param field_parsers:", "html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract", "Data from parsed form node object. :param parsed_form: A BeautifulSoup object containing a", "HTML markup. :param parser: A string containing a valid BeautifulSoup parsing library name.", "node object. :param parsed_form: A BeautifulSoup object containing a form. :returns: A FormData", "object containing an input field. :param field_parsers: A collection of HTML input element", "in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form = parsed_field.find_parent(\"form\")", "element. :param parsed_form_field: A BeautifulSoup object containing an input field. :param field_parsers: A", "not None: self.parse(markup, parser) def parse(self, markup: str, parser: str = None) ->", "if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form:", "= None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred form input", "A collection of ForData objects. The same objects are stored within the object.", "containing a valid BeautifulSoup parsing library name. \"\"\" self.forms = [] if markup", ":param markup: A string containing HTML markup. :param parser: A string property to", "owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None if", "parser in field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field)", "\"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(),", "bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from", "parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries", "string containing HTML markup. :param parser: A string containing a valid BeautifulSoup parsing", "property to select a BeutifulSoup Parser. :returns: A collection of ForData objects. The", "form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\",", "field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name, None): return", "ForData objects. The same objects are stored within the object. \"\"\" if parser", "List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred form input element. :param parsed_form_field:", "\"textarea\", )) form_id_map = {} for index, parsed_form in enumerate(parsed_forms): if \"id\" in", "= None) -> List[FormData]: \"\"\" Convert a HTML page into Form Data objects", "\"\"\" self.forms = [] if markup is not None: self.parse(markup, parser) def parse(self,", "their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index =", "= val.strip() elif match_key == \"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\":", "None: form_index = parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return", "collection of ForData objects. The same objects are stored within the object. \"\"\"", "val elif match_key == \"action\": form_data.action = val.strip() elif match_key == \"method\": form_data.method", "class HtmlFormParser: \"\"\" Parse and extract HTML forms from a HTML page. \"\"\"", "form_id_map = {} for index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]]", "bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {} for index,", "for index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form))", "Form Data from parsed form node object. :param parsed_form: A BeautifulSoup object containing", "parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form Data", "key.lower() if match_key == \"name\": form_data.name = val elif match_key == \"action\": form_data.action", "match_key == \"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip()", "from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and", "elif match_key == \"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype =", "None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred form input element.", "stored within the object. \"\"\" if parser is None: parser = \"html5lib\" parsers", "parsed_form_field: A BeautifulSoup object containing an input field. :param field_parsers: A collection of", "HtmlFormParser: \"\"\" Parse and extract HTML forms from a HTML page. \"\"\" def", "nearest containing form node, or specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms", "parsing library name. \"\"\" self.forms = [] if markup is not None: self.parse(markup,", "typing import List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from", "form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None)", "parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map =", "from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry", "List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred form", "in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing", "page into Form Data objects :param markup: A string containing HTML markup. :param", "val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]:", "https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None if \"form\" in parsed_field.attrs: form_index", "\"\"\" :param markup: A string containing HTML markup. :param parser: A string containing", "for key, val in parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\": form_data.name", "a HTML page into Form Data objects :param markup: A string containing HTML", "form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip() return form_data def", "object containing a form. :returns: A FormData object \"\"\" form_data = FormData() for", "from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML forms from", "HTML input element parsers. :returns: A collection of Form Data Entry objects \"\"\"", "= parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type is not None and", "str = None) -> List[FormData]: \"\"\" Convert a HTML page into Form Data", "= val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self,", "key, val in parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\": form_data.name =", "parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing form", "is not None: form_index = parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field,", "associate to the nearest containing form node, or specify their form owner by", "html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser:", "the object. \"\"\" if parser is None: parser = \"html5lib\" parsers = [", "object. \"\"\" if parser is None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(),", "form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form", "def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form Data from parsed form", "\"\"\" Create Form Data Entries from pasred form input element. :param parsed_form_field: A", "parsed_field in parsed_fields: form_index = None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"],", "if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the", "= val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) ->", "string containing HTML markup. :param parser: A string property to select a BeutifulSoup", "parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(),", "parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form = parsed_field.find_parent(\"form\") if", "pasred form input element. :param parsed_form_field: A BeautifulSoup object containing an input field.", "str = None): \"\"\" :param markup: A string containing HTML markup. :param parser:", "field. :param field_parsers: A collection of HTML input element parsers. :returns: A collection", "object. :param parsed_form: A BeautifulSoup object containing a form. :returns: A FormData object", "index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) #", "is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name, None): return parser.parse(parsed_form_field)", "if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form", ":param parsed_form: A BeautifulSoup object containing a form. :returns: A FormData object \"\"\"", "None, parser: str = None): \"\"\" :param markup: A string containing HTML markup.", "BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map", "\"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest", "Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type", ":param field_parsers: A collection of HTML input element parsers. :returns: A collection of", "html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML forms from a", "not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name, None): return parser.parse(parsed_form_field) return", "objects :param markup: A string containing HTML markup. :param parser: A string property", "from a HTML page. \"\"\" def __init__(self, markup: str = None, parser: str", "within the object. \"\"\" if parser is None: parser = \"html5lib\" parsers =", "a valid BeautifulSoup parsing library name. \"\"\" self.forms = [] if markup is", "BeutifulSoup Parser. :returns: A collection of ForData objects. The same objects are stored", "= [] if markup is not None: self.parse(markup, parser) def parse(self, markup: str,", "match_key == \"name\": form_data.name = val elif match_key == \"action\": form_data.action = val.strip()", ")) form_id_map = {} for index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs:", "an input field. :param field_parsers: A collection of HTML input element parsers. :returns:", "self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form Data from parsed", "FormData object \"\"\" form_data = FormData() for key, val in parsed_form.attrs.items(): match_key =", "collection of HTML input element parsers. :returns: A collection of Form Data Entry", "None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not None: form_index = parsed_forms.index(parsed_form) if", "form_data.action = val.strip() elif match_key == \"method\": form_data.method = val.strip().upper() elif match_key ==", "form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms", "A BeautifulSoup object containing an input field. :param field_parsers: A collection of HTML", "of Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in", "form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\")", "= None, parser: str = None): \"\"\" :param markup: A string containing HTML", "HTML markup. :param parser: A string property to select a BeutifulSoup Parser. :returns:", "if form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not None: form_index", "return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form Data from", "self.forms = [] if markup is not None: self.parse(markup, parser) def parse(self, markup:", "\"action\": form_data.action = val.strip() elif match_key == \"method\": form_data.method = val.strip().upper() elif match_key", "objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type is", "FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse", "str = None, parser: str = None): \"\"\" :param markup: A string containing", "BeautifulSoup object containing an input field. :param field_parsers: A collection of HTML input", "BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import", "extract HTML forms from a HTML page. \"\"\" def __init__(self, markup: str =", "\"\"\" if parser is None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(),", "A string property to select a BeutifulSoup Parser. :returns: A collection of ForData", "of ForData objects. The same objects are stored within the object. \"\"\" if", "parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self,", "string property to select a BeutifulSoup Parser. :returns: A collection of ForData objects.", "parser is None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(),", "containing an input field. :param field_parsers: A collection of HTML input element parsers.", "-> List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred form input element. :param", "markup. :param parser: A string property to select a BeutifulSoup Parser. :returns: A", "attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None if \"form\" in", "parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\": form_data.name = val elif match_key", "form_index = parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms", "object \"\"\" form_data = FormData() for key, val in parsed_form.attrs.items(): match_key = key.lower()", "markup: A string containing HTML markup. :param parser: A string containing a valid", "_create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form Data", "parser: A string property to select a BeutifulSoup Parser. :returns: A collection of", "in parsed_form.attrs.items(): match_key = key.lower() if match_key == \"name\": form_data.name = val elif", "\"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type is not", "import List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry", "form. :returns: A FormData object \"\"\" form_data = FormData() for key, val in", "form input element. :param parsed_form_field: A BeautifulSoup object containing an input field. :param", "form node object. :param parsed_form: A BeautifulSoup object containing a form. :returns: A", "in field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif", "Create Form Data from parsed form node object. :param parsed_form: A BeautifulSoup object", "A collection of HTML input element parsers. :returns: A collection of Form Data", "Tag from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser", "<gh_stars>0 import re from typing import List from bs4 import BeautifulSoup, Tag from", "form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields =", "not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag) -> FormData:", "parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {} for index, parsed_form", "Parse and extract HTML forms from a HTML page. \"\"\" def __init__(self, markup:", "form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing form node,", "A string containing HTML markup. :param parser: A string containing a valid BeautifulSoup", "= [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(),", "= form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is", "parser) def parse(self, markup: str, parser: str = None) -> List[FormData]: \"\"\" Convert", "field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form Data Entries from pasred", "] bs4_parser = BeautifulSoup(markup, parser) parsed_forms = bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\",", "parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name,", "are stored within the object. \"\"\" if parser is None: parser = \"html5lib\"", "bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {} for index, parsed_form in enumerate(parsed_forms):", "import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\"", "import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML", "[ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ]", "node, or specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in", "elif match_key == \"action\": form_data.action = val.strip() elif match_key == \"method\": form_data.method =", "from parsed form node object. :param parsed_form: A BeautifulSoup object containing a form.", "None) for parser in field_parsers: if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()):", "= key.lower() if match_key == \"name\": form_data.name = val elif match_key == \"action\":", "from pasred form input element. :param parsed_form_field: A BeautifulSoup object containing an input", "form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML forms from a HTML page.", "self.parse(markup, parser) def parse(self, markup: str, parser: str = None) -> List[FormData]: \"\"\"", "= None): \"\"\" :param markup: A string containing HTML markup. :param parser: A", "form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None", "objects. The same objects are stored within the object. \"\"\" if parser is", "a BeutifulSoup Parser. :returns: A collection of ForData objects. The same objects are", "Fields associate to the nearest containing form node, or specify their form owner", "parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(),", "from html_form_parser.models.form_data import FormData from html_form_parser.models.form_data_entry import FormDataEntry from html_form_parser.parsers import form_data_entry_parser class", "\"input\", \"select\", \"textarea\", )) form_id_map = {} for index, parsed_form in enumerate(parsed_forms): if", "# Fields associate to the nearest containing form node, or specify their form", "= \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(),", "[] if markup is not None: self.parse(markup, parser) def parse(self, markup: str, parser:", "a form. :returns: A FormData object \"\"\" form_data = FormData() for key, val", "Form Data objects :param markup: A string containing HTML markup. :param parser: A", "= FormData() for key, val in parsed_form.attrs.items(): match_key = key.lower() if match_key ==", "parser: A string containing a valid BeautifulSoup parsing library name. \"\"\" self.forms =", "name. \"\"\" self.forms = [] if markup is not None: self.parse(markup, parser) def", "= {} for index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] =", "None): \"\"\" :param markup: A string containing HTML markup. :param parser: A string", "List[FormData]: \"\"\" Convert a HTML page into Form Data objects :param markup: A", "element parsers. :returns: A collection of Form Data Entry objects \"\"\" field_type =", "\"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None: parent_form =", "import form_data_entry_parser class HtmlFormParser: \"\"\" Parse and extract HTML forms from a HTML", "BeautifulSoup parsing library name. \"\"\" self.forms = [] if markup is not None:", "by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None if \"form\"", "Tag) -> FormData: \"\"\" Create Form Data from parsed form node object. :param", "specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index", "string containing a valid BeautifulSoup parsing library name. \"\"\" self.forms = [] if", ":param parsed_form_field: A BeautifulSoup object containing an input field. :param field_parsers: A collection", "A BeautifulSoup object containing a form. :returns: A FormData object \"\"\" form_data =", "Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers:", "form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser", "of HTML input element parsers. :returns: A collection of Form Data Entry objects", "parent_form is not None: form_index = parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend(", "forms from a HTML page. \"\"\" def __init__(self, markup: str = None, parser:", "is None: parser = \"html5lib\" parsers = [ form_data_entry_parser.SelectableInputFormElementParser(), form_data_entry_parser.ColorInputFormElementParser(), form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(),", "A string containing HTML markup. :param parser: A string property to select a", "None) if form_index is None: parent_form = parsed_field.find_parent(\"form\") if parent_form is not None:", "field_parsers: A collection of HTML input element parsers. :returns: A collection of Form", "def parse(self, markup: str, parser: str = None) -> List[FormData]: \"\"\" Convert a", "_create_form_data(self, parsed_form: Tag) -> FormData: \"\"\" Create Form Data from parsed form node", "\"select\", \"textarea\", )) form_id_map = {} for index, parsed_form in enumerate(parsed_forms): if \"id\"", "-> List[FormData]: \"\"\" Convert a HTML page into Form Data objects :param markup:", "in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index self.forms.append(self._create_form_data(parsed_form)) # Fields associate", "if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()): return parser.parse(parsed_form_field) elif parser.suitable(parsed_form_field.name, None):", "parent_form = parsed_field.find_parent(\"form\") if parent_form is not None: form_index = parsed_forms.index(parsed_form) if form_index", "elif match_key == \"enctype\": form_data.enctype = val.strip() return form_data def _create_form_data_field(self, parsed_form_field: Tag,", ":param parser: A string property to select a BeutifulSoup Parser. :returns: A collection", "and extract HTML forms from a HTML page. \"\"\" def __init__(self, markup: str", "re from typing import List from bs4 import BeautifulSoup, Tag from html_form_parser.models.form_data import", "{} for index, parsed_form in enumerate(parsed_forms): if \"id\" in parsed_form.attrs: form_id_map[parsed_form.attrs[\"id\"]] = index", "the nearest containing form node, or specify their form owner by attribute. #", "val.strip() elif match_key == \"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype", "Parser. :returns: A collection of ForData objects. The same objects are stored within", ":returns: A collection of ForData objects. The same objects are stored within the", "A collection of Form Data Entry objects \"\"\" field_type = parsed_form_field.attrs.get(\"type\", None) for", "= bs4_parser.find_all(\"form\") parsed_fields = bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {} for", "= bs4_parser.find_all((\"button\", \"input\", \"select\", \"textarea\", )) form_id_map = {} for index, parsed_form in", "markup: str = None, parser: str = None): \"\"\" :param markup: A string", "Data Entries from pasred form input element. :param parsed_form_field: A BeautifulSoup object containing", "None) -> List[FormData]: \"\"\" Convert a HTML page into Form Data objects :param", "valid BeautifulSoup parsing library name. \"\"\" self.forms = [] if markup is not", "Data objects :param markup: A string containing HTML markup. :param parser: A string", "if match_key == \"name\": form_data.name = val elif match_key == \"action\": form_data.action =", "parse(self, markup: str, parser: str = None) -> List[FormData]: \"\"\" Convert a HTML", "a HTML page. \"\"\" def __init__(self, markup: str = None, parser: str =", ":param markup: A string containing HTML markup. :param parser: A string containing a", "parsed_form: A BeautifulSoup object containing a form. :returns: A FormData object \"\"\" form_data", "def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]: \"\"\" Create Form", "form node, or specify their form owner by attribute. # https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field", "\"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip() return form_data", "FormData: \"\"\" Create Form Data from parsed form node object. :param parsed_form: A", "HTML page into Form Data objects :param markup: A string containing HTML markup.", "= parsed_forms.index(parsed_form) if form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def", "markup: A string containing HTML markup. :param parser: A string property to select", "# https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms for parsed_field in parsed_fields: form_index = None if \"form\" in parsed_field.attrs:", "form_data_entry_parser.RangeInputFormElementParser(), form_data_entry_parser.SubmitInputFormElementParser(), form_data_entry_parser.ButtonInputFormElementParser(), form_data_entry_parser.ImageInputFormElementParser(), form_data_entry_parser.ButtonFormElementParser(), form_data_entry_parser.InputFormElementParser(), form_data_entry_parser.SelectFormElementParser(), form_data_entry_parser.TextareaFormElementParser(), form_data_entry_parser.FormDataEntryParser(), ] bs4_parser = BeautifulSoup(markup,", "= val elif match_key == \"action\": form_data.action = val.strip() elif match_key == \"method\":", "The same objects are stored within the object. \"\"\" if parser is None:", "field_type = parsed_form_field.attrs.get(\"type\", None) for parser in field_parsers: if field_type is not None", "None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is None:", "= parsed_field.find_parent(\"form\") if parent_form is not None: form_index = parsed_forms.index(parsed_form) if form_index is", "markup is not None: self.parse(markup, parser) def parse(self, markup: str, parser: str =", "form_index is not None: self.forms[form_index].fields.extend( self._create_form_data_field(parsed_field, parsers)) return self.forms def _create_form_data(self, parsed_form: Tag)", ":returns: A FormData object \"\"\" form_data = FormData() for key, val in parsed_form.attrs.items():", "\"\"\" Parse and extract HTML forms from a HTML page. \"\"\" def __init__(self,", "Form Data Entries from pasred form input element. :param parsed_form_field: A BeautifulSoup object", "= None if \"form\" in parsed_field.attrs: form_index = form_id_map.get(parsed_field.attrs[\"form\"], None) if form_index is", "= index self.forms.append(self._create_form_data(parsed_form)) # Fields associate to the nearest containing form node, or", "same objects are stored within the object. \"\"\" if parser is None: parser", "\"\"\" Create Form Data from parsed form node object. :param parsed_form: A BeautifulSoup", "== \"method\": form_data.method = val.strip().upper() elif match_key == \"enctype\": form_data.enctype = val.strip() return" ]
[ "self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept()", "self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False)", "self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))),", "False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent)", "dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and", "index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp =", "parent def paint(self, painter, option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True)", "return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential =", "self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center()", "option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp", "self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) }", "\"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget)", "\"\"\" def __init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView()", "remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str,", "in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def", "self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self):", "__init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel =", "parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter, option, index): if not", "# Compose main layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\",", "item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def", "deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin", "not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword)", "credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential", "QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles", "sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() #", "from qtpy.QtCore import * from qtpy.QtGui import * from qtpy.QtWidgets import * from", "hosts \"\"\" def __init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview =", "self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout() if not", "self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\":", "if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum,", "= QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password)", "return credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items(): item =", "= QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles = QComboBox()", "= QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\")", "= manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False)", "= QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton =", "Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") #", "= CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes():", "QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main", "# Setup fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit()", "connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\":", "self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent):", "UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar", "* from qtpy.QtGui import * from qtpy.QtWidgets import * from xicam.gui.static import path", "self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton,", "= QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self):", "item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials =", "SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure connections to", "self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\",", "add_credential(self): \"\"\" Open the CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_()", "else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False)", ") self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(),", "position # self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint", "self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password = QLineEdit()", "QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton", "self.profilename = QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save", "mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\",", "} ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent", "A built-in settings plugin to configure connections to other hosts \"\"\" def __init__(self):", "requests from qtpy.QtCore import * from qtpy.QtGui import * from qtpy.QtWidgets import *", "self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent", "self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate", "= QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\")", "QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles)", "# self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint =", "self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout =", "def paint(self, painter, option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete", "state): self.connectionsmodel.clear() for name, credential in state.items(): item = QStandardItem(name) item.credential = credential", "install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a", "QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else:", "self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\"", "{ \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept() #", "credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential", "credential in state.items(): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item)", "a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item", "QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin,", "def loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True)", "credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def fromState(self, state): self.connectionsmodel.clear() for name,", "i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict)", "self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(),", "self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole)", "= QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit()", "\"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self):", "self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for", "mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in", "self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for name, credential in credentials.items(): if", "QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical)", "self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(),", "plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the", "state.items(): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property", "= parent def paint(self, painter, option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent())", "QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog", "import deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings", "layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host)", "= deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None", "credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for", "manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure connections to other", "\"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\"", "self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( {", "and position # self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())", "profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else:", "paint(self, painter, option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\")", "credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False)", "self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host", "self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) sp.setWidthForHeight(True)", "self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword =", "self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\":", "xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure", "CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row())", "in state.items(): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset()", "frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename", "ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter, option,", "self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart install", "= self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields", "mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name,", "Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict):", "credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item =", "= name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i", "self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole)", "self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential", "self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) #", "Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton", "item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for name,", "mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add", "other hosts \"\"\" def __init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview", "sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and position", "# Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\")", "self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def", "if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def fromState(self, state): self.connectionsmodel.clear() for", "self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect", "def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter, option, index):", "self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton,", "= QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember", "\"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential:", "self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar =", "Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton =", "self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile)", "self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item = QStandardItem(name) item.credential = credential", "<reponame>JulReinhardt/Xi-cam import requests from qtpy.QtCore import * from qtpy.QtGui import * from qtpy.QtWidgets", "self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget)", "Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add)", "super(CredentialDialog, self).__init__() # Set size and position # self.setGeometry(0, 0, 800, 500) frameGm", "self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept()", "QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) sp.setWidthForHeight(True) button.setSizePolicy(sp) button.clicked.connect(index.data()) self._parent.setIndexWidget(index,", "self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename =", "self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\":", "xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from xicam.plugins import SettingsPlugin, manager class", "for name, credential in state.items(): item = QStandardItem(name) item.credential = credential item.name =", "\"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self):", "self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(),", "self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def", "QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile", "# Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\":", "profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set", "if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item = QStandardItem(name) item.credential", "= credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials)", "self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item = QStandardItem(name) item.credential =", "addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if", "_add_credential(self, name: str, credential: dict): item = QStandardItem(name) item.credential = credential item.name =", "\"\"\" Open the CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def", "profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename]", "if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False,", "}, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\":", "= QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username)", "Setup fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username", "QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\")", "def add_credential(self): \"\"\" Open the CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential)", "self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), {", "credentials = deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] =", "False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(),", "class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter,", "mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles", "\"savepassword\": False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\":", "SearchLineEdit from copy import deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\"", "import requests from qtpy.QtCore import * from qtpy.QtGui import * from qtpy.QtWidgets import", "name: str, credential: dict): item = QStandardItem(name) item.credential = credential item.name = name", "= QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget", "mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) #", "self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"])", "to configure connections to other hosts \"\"\" def __init__(self): # Setup UI self.widget", "to other hosts \"\"\" def __init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout())", "import * from qtpy.QtGui import * from qtpy.QtWidgets import * from xicam.gui.static import", "= QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if", "self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential)", "dict): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index())", "for name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials", "loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True)", "dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential", ") self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent =", "credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def", "buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close)", "def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and position # self.setGeometry(0, 0,", "QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return", "name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in", "qtpy.QtWidgets import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy", "def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog):", "Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename ==", "copy import deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in", "self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"])", "= QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove", "def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(),", "addmode=True): super(CredentialDialog, self).__init__() # Set size and position # self.setGeometry(0, 0, 800, 500)", "item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for", "self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText()", "fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items(): item = QStandardItem(name) item.credential =", "button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) sp.setWidthForHeight(True) button.setSizePolicy(sp) button.clicked.connect(index.data()) self._parent.setIndexWidget(index, button)", "import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from xicam.plugins import", "def __init__(self): # Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel", "mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode: mainLayout.addRow(self.rememberprofile)", "credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items(): item = QStandardItem(name)", "for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality", "item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def", "} class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True):", "self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))),", "QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials", "centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles =", "500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) #", "self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart install dialog", "\"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault?", "not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)", "the CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\"", "def _add_credential(self, name: str, credential: dict): item = QStandardItem(name) item.credential = credential item.name", "mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout)", "name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def", "QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout() if not addmode:", "# Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def", "__init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter, option, index): if", "0, 800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint)", "\"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit(", "= QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar)", "button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) sp.setWidthForHeight(True) button.setSizePolicy(sp)", "item.index()) def toState(self): credentials = deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\",", "not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items():", "= QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\"))) sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) sp.setWidthForHeight(True) button.setSizePolicy(sp) button.clicked.connect(index.data())", "deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return", "if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\",", "class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure connections to other hosts", "\"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"])", "\"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open", "self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile =", "= QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self):", "self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") #", "self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(),", "self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox()", "False): credential[\"password\"] = None return credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential", "name, credential in state.items(): item = QStandardItem(name) item.credential = credential item.name = name", "toState(self): credentials = deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"]", "= Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set", "plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def", "modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True)", "def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False,", "name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for name, credential in", "Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self,", "QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password =", "from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from xicam.plugins import SettingsPlugin, manager", "settings plugin to configure connections to other hosts \"\"\" def __init__(self): # Setup", "import * from qtpy.QtWidgets import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import", "Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True)", "credential: dict): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(),", "self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename", "if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout", "@property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class", "self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton,", "credential[\"password\"] = None return credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential in", "in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def fromState(self, state):", "\"savepassword\": False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self, parent): super(ConnectDelegate,", "manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def", "QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode:", "Compose main layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename)", "add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, },", "# Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel()", "= QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget = QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole)", "self).__init__(parent) self._parent = parent def paint(self, painter, option, index): if not self._parent.indexWidget(index): button", "self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault? class", "from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to", "self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } )", "= QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password", "self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit( self.profilename.text(), { \"host\":", "self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\":", "* from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy", "mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not", "credential in credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def fromState(self,", "addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout", "QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout()", "credentials.items(): if credential.get(\"savepassword\", False): credential[\"password\"] = None return credentials def fromState(self, state): self.connectionsmodel.clear()", "self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))),", "def toState(self): credentials = deepcopy(self.credentials) for name, credential in credentials.items(): if credential.get(\"savepassword\", False):", "path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from xicam.plugins import SettingsPlugin,", "Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and position # self.setGeometry(0,", "QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit() self.password = QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword", "\"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate):", "qtpy.QtCore import * from qtpy.QtGui import * from qtpy.QtWidgets import * from xicam.gui.static", "plugin to configure connections to other hosts \"\"\" def __init__(self): # Setup UI", "self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked(): self.add()", "painter, option, index): if not self._parent.indexWidget(index): button = QToolButton(self.parent()) button.setAutoRaise(True) button.setText(\"Delete Operation\") button.setIcon(QIcon(path(\"icons/trash.png\")))", "None return credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items(): item", "super(ConnectDelegate, self).__init__(parent) self._parent = parent def paint(self, painter, option, index): if not self._parent.indexWidget(index):", "self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if", "= QDialogButtonBox() if addmode: self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole) else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose", "Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size", "import SearchLineEdit from copy import deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin):", "self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username = QLineEdit()", "from qtpy.QtWidgets import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from", "= name self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for name, credential", "str, credential: dict): item = QStandardItem(name) item.credential = credential item.name = name self.connectionsmodel.appendRow(item)", "range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def __init__(self,", "self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount())", "QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\")", "QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\",", "QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect) self.cancelButton.clicked.connect(self.close) self.profiles.currentTextChanged.connect(self.loadProfile) self.buttonboxWidget =", "Open the CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self):", "qtpy.QtGui import * from qtpy.QtWidgets import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit", "800, 500) frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft())", "= Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and position #", "__init__(self, addmode=True): super(CredentialDialog, self).__init__() # Set size and position # self.setGeometry(0, 0, 800,", "self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\",", "dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton = QPushButton(\"C&onnect\") self.cancelButton = QPushButton(\"&Cancel\") self.addButton.clicked.connect(self.add) self.connectButton.clicked.connect(self.connect)", "if not addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\",", "in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self):", "== \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"])", "CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog, self).__init__()", "for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect =", "self.widget) def add_credential(self): \"\"\" Open the CamMart install dialog \"\"\" self._dialog = CredentialDialog()", "def remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name:", "QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential)", "self.rememberprofile.isChecked(): self.add() self.sigConnect.emit( { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }", "if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password)", "= QLineEdit() self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup", "QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\",", "{ \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def", "= credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return { self.connectionsmodel.item(i).name:", "self.setModal(True) def loadProfile(self): profilename = self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True)", "Setup UI self.widget = QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel)", "class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict) sigConnect = Signal(dict) def __init__(self, addmode=True): super(CredentialDialog,", "= QWidget() self.widget.setLayout(QHBoxLayout()) self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar()", "built-in settings plugin to configure connections to other hosts \"\"\" def __init__(self): #", "# Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\")", "connections to other hosts \"\"\" def __init__(self): # Setup UI self.widget = QWidget()", "from qtpy.QtGui import * from qtpy.QtWidgets import * from xicam.gui.static import path from", "def fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items(): item = QStandardItem(name) item.credential", "* from qtpy.QtWidgets import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit", "\"\"\" A built-in settings plugin to configure connections to other hosts \"\"\" def", "ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure connections to other hosts \"\"\"", "import * from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import", "name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True)", "fields self.host = QLineEdit() self.profiles = QComboBox() self.profiles.addItem(\"New...\") self.profilename = QLineEdit() self.username =", "main layout mainLayout = QFormLayout() if not addmode: mainLayout.addRow(\"Profile\", self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\",", "super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart install dialog \"\"\"", "import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A built-in settings plugin to configure connections", "self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\" if", "screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup fields self.host =", "size and position # self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen =", "= self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential", "= None return credentials def fromState(self, state): self.connectionsmodel.clear() for name, credential in state.items():", "from copy import deepcopy from xicam.plugins import SettingsPlugin, manager class ConnectionSettingsPlugin(SettingsPlugin): \"\"\" A", "self.password.setEchoMode(QLineEdit.Password) self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons", "QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\")", "self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\", self.add_credential) self.plugintoolbar.addAction(QIcon(str(path(\"icons/minus.png\"))), \"Remove plugin\", self.remove_credential) self.widget.layout().addWidget(self.listview)", "self.connectionsmodel.clear() for name, credential in state.items(): item = QStandardItem(name) item.credential = credential item.name", "self.listview = QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add", "\"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename =", "\"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self, name: str, credential: dict): item = QStandardItem(name)", "self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename)", "self._dialog.exec_() def remove_credential(self): \"\"\" Removes a credential \"\"\" if self.listview.selectedIndexes(): self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row()) def _add_credential(self,", "addmode: mainLayout.addRow(self.rememberprofile) mainLayout.addRow(self.buttonboxWidget) # Populate profiles for name, credential in manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name)", "self.rememberprofile.setVisible(True) else: credential = manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials[profilename] self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False)", "Set size and position # self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry() screen", "self.remove_credential) self.widget.layout().addWidget(self.listview) self.widget.layout().addWidget(self.plugintoolbar) super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart", "self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str, dict)", "xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from xicam.plugins", "self.sigAddCredential.emit( self.profilename.text(), { \"host\": self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, )", "self.username.setText(credential[\"username\"]) self.host.setText(credential[\"host\"]) self.password.setText(credential[\"password\"]) self.savepassword.setChecked(credential[\"savepassword\"]) self.profilename.setText(profilename) self.username.setEnabled(False) self.password.setEnabled(False) self.host.setEnabled(False) self.savepassword.setEnabled(False) self.rememberprofile.setVisible(False) def add(self): self.sigAddCredential.emit(", "self.connectionsmodel.appendRow(item) self.connectionsmodel.dataChanged.emit(item.index(), item.index()) def toState(self): credentials = deepcopy(self.credentials) for name, credential in credentials.items():", "configure connections to other hosts \"\"\" def __init__(self): # Setup UI self.widget =", "{ self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential for i in range(self.connectionsmodel.rowCount()) } class CredentialDialog(QDialog): sigAddCredential = Signal(str,", "self.savepassword = QCheckBox(\"Save Password\") self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton", "manager.get_plugin_by_name(\"Connections\", \"SettingsPlugin\").credentials.items(): self.profiles.addItem(name) self.setLayout(mainLayout) self.setWindowTitle(\"Add Connection...\") # Set modality self.setModal(True) def loadProfile(self): profilename", "\"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart install dialog \"\"\" self._dialog =", "CamMart install dialog \"\"\" self._dialog = CredentialDialog() self._dialog.sigAddCredential.connect(self._add_credential) self._dialog.exec_() def remove_credential(self): \"\"\" Removes", "self).__init__() # Set size and position # self.setGeometry(0, 0, 800, 500) frameGm =", "self.rememberprofile = QCheckBox(\"Remember Profile\") # Setup dialog buttons self.addButton = QPushButton(\"&Add\") self.connectButton =", "item.credential = credential item.name = name self.connectionsmodel.appendRow(item) self.listview.reset() @property def credentials(self): return {", "frameGm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() frameGm.moveCenter(centerPoint) self.move(frameGm.topLeft()) # Setup", "# Set size and position # self.setGeometry(0, 0, 800, 500) frameGm = self.frameGeometry()", "from xicam.gui.static import path from xicam.gui.widgets.searchlineedit import SearchLineEdit from copy import deepcopy from", "self.host.text(), \"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self): if", "self).__init__(QIcon(str(path(\"icons/server.png\"))), \"Connections\", self.widget) def add_credential(self): \"\"\" Open the CamMart install dialog \"\"\" self._dialog", "else: self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole) self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole) # Compose main layout mainLayout = QFormLayout() if", "\"password\": self.password.text(), \"savepassword\": False, } ) self.accept() # Segfault? class ConnectDelegate(QItemDelegate): def __init__(self,", "self.profiles) mainLayout.addRow(\"Profile\", self.profilename) mainLayout.addRow(\"Host\", self.host) mainLayout.addRow(\"Username\", self.username) mainLayout.addRow(\"Password\", self.password) mainLayout.addRow(self.savepassword) if not addmode:", "self.profiles.currentText() if profilename == \"New...\": self.username.setEnabled(True) self.password.setEnabled(True) self.host.setEnabled(True) self.savepassword.setEnabled(True) self.rememberprofile.setVisible(True) else: credential =", "self._parent = parent def paint(self, painter, option, index): if not self._parent.indexWidget(index): button =", "\"username\": self.username.text(), \"password\": self.password.text(), \"savepassword\": False, }, ) self.accept() def connect(self): if self.rememberprofile.isChecked():", "= QListView() self.connectionsmodel = QStandardItemModel() self.listview.setModel(self.connectionsmodel) self.plugintoolbar = QToolBar() self.plugintoolbar.setOrientation(Qt.Vertical) self.plugintoolbar.addAction(QIcon(str(path(\"icons/plus.png\"))), \"Add plugin\"," ]
[ "'%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s -O %s_chn.zip' % (link,", "-O %s_ann.zip' % (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' %", "in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s", "link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O %s_ann.zip'", "seqs = data['sequences'] for v in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download", "%s_ann.zip' % (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link)", "v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s -O", "data_file = './description.json' with open(data_file) as f: data = json.load(f) home_page = data['homepage']", "v in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget", "'%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O %s_ann.zip' % (link,", "requests data_file = './description.json' with open(data_file) as f: data = json.load(f) home_page =", "link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s -O %s_chn.zip'", "json.load(f) home_page = data['homepage'] seqs = data['sequences'] for v in seqs: link =", "%s' % link) os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link = '%s%s'", "data = json.load(f) home_page = data['homepage'] seqs = data['sequences'] for v in seqs:", "%s -O %s_ann.zip' % (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s'", "= './description.json' with open(data_file) as f: data = json.load(f) home_page = data['homepage'] seqs", "import requests data_file = './description.json' with open(data_file) as f: data = json.load(f) home_page", "link) os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url'])", "home_page = data['homepage'] seqs = data['sequences'] for v in seqs: link = '%s%s'", "open(data_file) as f: data = json.load(f) home_page = data['homepage'] seqs = data['sequences'] for", "json import os import requests data_file = './description.json' with open(data_file) as f: data", "for v in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link)", "= data['homepage'] seqs = data['sequences'] for v in seqs: link = '%s%s' %", "seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O", "as f: data = json.load(f) home_page = data['homepage'] seqs = data['sequences'] for v", "(link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s", "f: data = json.load(f) home_page = data['homepage'] seqs = data['sequences'] for v in", "= data['sequences'] for v in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s'", "'./description.json' with open(data_file) as f: data = json.load(f) home_page = data['homepage'] seqs =", "with open(data_file) as f: data = json.load(f) home_page = data['homepage'] seqs = data['sequences']", "= '%s%s' % (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O %s_ann.zip' %", "(home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link", "% (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget", "% (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s -O %s_chn.zip' % (link, v['name']))", "os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link = '%s%s' % (home_page,v['channels']['color']['url']) print('download", "import json import os import requests data_file = './description.json' with open(data_file) as f:", "% (home_page,v['annotations']['url']) print('download %s' % link) os.system('wget %s -O %s_ann.zip' % (link, v['name']))", "data['homepage'] seqs = data['sequences'] for v in seqs: link = '%s%s' % (home_page,v['annotations']['url'])", "import os import requests data_file = './description.json' with open(data_file) as f: data =", "data['sequences'] for v in seqs: link = '%s%s' % (home_page,v['annotations']['url']) print('download %s' %", "% link) os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link = '%s%s' %", "= json.load(f) home_page = data['homepage'] seqs = data['sequences'] for v in seqs: link", "print('download %s' % link) os.system('wget %s -O %s_ann.zip' % (link, v['name'])) link =", "= '%s%s' % (home_page,v['channels']['color']['url']) print('download %s' % link) os.system('wget %s -O %s_chn.zip' %", "os import requests data_file = './description.json' with open(data_file) as f: data = json.load(f)" ]
[ "time import colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne =", "colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime =", "(i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb =", "colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount):", "reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1, rotationTime) colorsCount =", "= float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount =", "increment increment = 3 sleepTime = rotationTime / hyperion.ledCount while sleepTime < 0.05:", "<= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i", "for i in range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2])", "rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0,", "Start the write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData = ledData[-increment:] +", "rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2)", "parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255))", "ledData = bytearray() for i in range(hyperion.ledCount): if i <= colorsCount: rgb =", "False)) # Check parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) #", "colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for i in range(hyperion.ledCount):", "rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led data", "direction if needed if reverse: increment = -increment # Start the write data", "int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and rotation increment increment = 3", "2 increment %= hyperion.ledCount # Switch direction if needed if reverse: increment =", "colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for i in range(hyperion.ledCount): if i", "write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData = ledData[-increment:] + ledData[:-increment] time.sleep(sleepTime)", "colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0))", "bytearray() for i in range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1],", "= hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse =", "# Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0,", "(0,0,0) ledData = bytearray() for i in range(hyperion.ledCount): if i <= colorsCount: rgb", "Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo =", "colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0,", "hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse',", "= min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0)", "(hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData", "reverse: increment = -increment # Start the write data loop while not hyperion.abort():", "the write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData = ledData[-increment:] + ledData[:-increment]", "increment = -increment # Start the write data loop while not hyperion.abort(): hyperion.setColor(ledData)", "rotation increment increment = 3 sleepTime = rotationTime / hyperion.ledCount while sleepTime <", "colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse", "colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i <", "= colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for i in", "sleepTime < 0.05: increment *= 2 sleepTime *= 2 increment %= hyperion.ledCount #", "-increment # Start the write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData =", "sleep time and rotation increment increment = 3 sleepTime = rotationTime / hyperion.ledCount", "2 sleepTime *= 2 increment %= hyperion.ledCount # Switch direction if needed if", "*= 2 sleepTime *= 2 increment %= hyperion.ledCount # Switch direction if needed", "colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) #", "if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1)", "hsv2[1], hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate", "import time import colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne", "# Switch direction if needed if reverse: increment = -increment # Start the", "import hyperion import time import colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time',", "= colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData", "the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0)", "increment *= 2 sleepTime *= 2 increment %= hyperion.ledCount # Switch direction if", "= max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1", "sleepTime = rotationTime / hyperion.ledCount while sleepTime < 0.05: increment *= 2 sleepTime", "= (0,0,0) ledData = bytearray() for i in range(hyperion.ledCount): if i <= colorsCount:", "colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]),", "max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1 =", "hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0)", "(255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False))", "else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep", "if reverse: increment = -increment # Start the write data loop while not", "rotationTime / hyperion.ledCount while sleepTime < 0.05: increment *= 2 sleepTime *= 2", "Check parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the", "colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) #", "the sleep time and rotation increment increment = 3 sleepTime = rotationTime /", "0.05: increment *= 2 sleepTime *= 2 increment %= hyperion.ledCount # Switch direction", "= bytearray() for i in range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0],", "hyperion import time import colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0))", "hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for i", "sleepTime *= 2 increment %= hyperion.ledCount # Switch direction if needed if reverse:", "i in range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif", "and rotation increment increment = 3 sleepTime = rotationTime / hyperion.ledCount while sleepTime", "colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData =", "(i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1],", "# Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo", "2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2)", "rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount", "& (i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb", "the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two',", "= rotationTime / hyperion.ledCount while sleepTime < 0.05: increment *= 2 sleepTime *=", "parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize the led", "*= 2 increment %= hyperion.ledCount # Switch direction if needed if reverse: increment", "colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for i in range(hyperion.ledCount): if", "ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and rotation increment", "bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount)", "int(255*rgb[2]))) # Calculate the sleep time and rotation increment increment = 3 sleepTime", "= colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) +", "bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and rotation increment increment =", "data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack =", "# Calculate the sleep time and rotation increment increment = 3 sleepTime =", "# Check parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2, colorsCount) # Initialize", "while sleepTime < 0.05: increment *= 2 sleepTime *= 2 increment %= hyperion.ledCount", "min(hyperion.ledCount/2, colorsCount) # Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2", "increment = 3 sleepTime = rotationTime / hyperion.ledCount while sleepTime < 0.05: increment", "= bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1, rotationTime) colorsCount = min(hyperion.ledCount/2,", "colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and rotation", "+ colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData +=", "= -increment # Start the write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData", "led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack", "%= hyperion.ledCount # Switch direction if needed if reverse: increment = -increment #", "= hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1,", "< 0.05: increment *= 2 sleepTime *= 2 increment %= hyperion.ledCount # Switch", "3 sleepTime = rotationTime / hyperion.ledCount while sleepTime < 0.05: increment *= 2", "colorBlack = (0,0,0) ledData = bytearray() for i in range(hyperion.ledCount): if i <=", "= colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and", "increment %= hyperion.ledCount # Switch direction if needed if reverse: increment = -increment", "needed if reverse: increment = -increment # Start the write data loop while", "Calculate the sleep time and rotation increment increment = 3 sleepTime = rotationTime", "= hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check", "if needed if reverse: increment = -increment # Start the write data loop", "+= bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time and rotation increment increment", "float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one', (255,0,0)) colorTwo = hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count',", "Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0,", "colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray() for", "colorOne[1]/255.0, colorOne[2]/255.0) hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0) colorBlack = (0,0,0) ledData = bytearray()", "range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >=", "rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the sleep time", "< (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack", "hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1, rotationTime) colorsCount", "import colorsys # Get the parameters rotationTime = float(hyperion.args.get('rotation-time', 2.0)) colorOne = hyperion.args.get('color_one',", "# Start the write data loop while not hyperion.abort(): hyperion.setColor(ledData) ledData = ledData[-increment:]", "elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0],", ">= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2])", "hyperion.ledCount while sleepTime < 0.05: increment *= 2 sleepTime *= 2 increment %=", "= colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2])))", "hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb", "hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else:", "i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i >= hyperion.ledCount/2-1) &", "(0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime", "colorsCount) # Initialize the led data hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0) hsv2 =", "time and rotation increment increment = 3 sleepTime = rotationTime / hyperion.ledCount while", "= 3 sleepTime = rotationTime / hyperion.ledCount while sleepTime < 0.05: increment *=", "hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters rotationTime = max(0.1, rotationTime)", "rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]),", "/ hyperion.ledCount while sleepTime < 0.05: increment *= 2 sleepTime *= 2 increment", "in range(hyperion.ledCount): if i <= colorsCount: rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2]) elif (i", "hsv2[2]) else: rgb = colorBlack ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2]))) # Calculate the", "hyperion.ledCount # Switch direction if needed if reverse: increment = -increment # Start", "hsv1[2]) elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount): rgb =", "Switch direction if needed if reverse: increment = -increment # Start the write", "hyperion.args.get('color_two', (0,0,255)) colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2) reverse = bool(hyperion.args.get('reverse', False)) # Check parameters" ]
[ "length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log the", "= 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def main():", "--- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch", "import experiment, tracker from labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from", "torch.Tensor): \"\"\" Log losses at the initial and final tokens \"\"\" # If", "Train for $32$ epochs 'epochs': 128, # Batch size $128$ 'batch_size': 128, #", "'GPT_ALiBi' # Longer validation set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def", "at the initial and final tokens \"\"\" # If there are more tokens", "token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi", "transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len: int = 128 valid_loader", "[configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary sizes for embeddings", "data loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch,", "mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader)", "loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run training conf.run()", "AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs", "'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set", "attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha'", "ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set", "shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\"", "SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs", "use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary", "other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the initial and final", "Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to train a", "- 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module", "uses GELU activation for position wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't", "### ALiBi based Transformer configurations \"\"\" # We use our # [configurable transformer", "$128$ 'batch_size': 128, # Switch between training and validation for $10$ times #", "[Attention with Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to", "dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha',", "the loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c:", "token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at the final token tracker.add(f'loss.{int(output.shape[0])", "mechanism. \"\"\" # ALiBi based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' #", "\"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn,", "conf = TransformerConfigs() # Set the vocabulary sizes for embeddings and generating logits", "Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn,", "@option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\" #", "the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create", "Log the loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def", "1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log the loss at the", "'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def main(): #", "title: Attention with Linear Biases (ALiBi) Experiment summary: This experiment trains an Attention", "size of $128$ 'valid_seq_len': 80, # Train for $32$ epochs 'epochs': 128, #", "configurations experiment.configs(conf, { # Use character level tokenizer 'tokenizer': 'character', # Prompt separator", "position wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use positional embeddings conf.src_embed", "(during validation), if self.seq_len < output.shape[0]: # Log the loss at training sequence", "`valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi')", "experiment with experiment.start(): # Run training conf.run() # if __name__ == '__main__': main()", "def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() #", "Log losses at the initial and final tokens \"\"\" # If there are", "and change the attention mechanism. \"\"\" # ALiBi based transformer (defined below) transformer:", "Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to train a [ALiBi", "If there are more tokens that the training sequence length (during validation), if", "# ALiBi based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation", "'valid_seq_len': 80, # Train for $32$ epochs 'epochs': 128, # Batch size $128$", "the attention mechanism. \"\"\" # ALiBi based transformer (defined below) transformer: TransformerConfigs =", "self.loss_func(output[0], target[0])) # Log the loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.',", "Configurations We extend [GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\" # ALiBi", "logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation for", "of $128$ 'seq_len': 64, # Use a context size of $128$ 'valid_seq_len': 80,", "Start the experiment with experiment.start(): # Run training conf.run() # if __name__ ==", "context size of $128$ 'valid_seq_len': 80, # Train for $32$ epochs 'epochs': 128,", "Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, })", "# GPT uses GELU activation for position wise feedforward conf.ffn.activation = 'GELU' #", "main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() # Override", "[ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import", "import DataLoader from labml import experiment, tracker from labml.configs import option, calculate from", "This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny", "# Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1,", "tokens \"\"\" # If there are more tokens that the training sequence length", "Override configurations experiment.configs(conf, { # Use character level tokenizer 'tokenizer': 'character', # Prompt", "model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader from labml import", "DataLoader from labml import experiment, tracker from labml.configs import option, calculate from labml_helpers.datasets.text", "conf = Configs() # Override configurations experiment.configs(conf, { # Use character level tokenizer", "Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() # Override configurations experiment.configs(conf,", "def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model,", "with Linear Biases (ALiBi) Experiment summary: This experiment trains an Attention with Linear", "tracker from labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import", "of $128$ 'valid_seq_len': 80, # Train for $32$ epochs 'epochs': 128, # Batch", "attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)", "# Longer validation set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self,", "trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset.", "size of $128$ 'seq_len': 64, # Use a context size of $128$ 'valid_seq_len':", "tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at the final token tracker.add(f'loss.{int(output.shape[0]) -", "= 'GELU' # ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed =", "positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set all attention mechanisms", "80, # Train for $32$ epochs 'epochs': 128, # Batch size $128$ 'batch_size':", "configurations](../gpt/index.html) and change the attention mechanism. \"\"\" # ALiBi based transformer (defined below)", "TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) #", "'epochs': 128, # Batch size $128$ 'batch_size': 128, # Switch between training and", "AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)", "final tokens \"\"\" # If there are more tokens that the training sequence", "the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss", "# Set the vocabulary sizes for embeddings and generating logits conf.n_src_vocab = c.n_tokens", "the loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len", "Use a context size of $128$ 'seq_len': 64, # Use a context size", "tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', # Starting prompt", "# return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf", "epochs 'epochs': 128, # Batch size $128$ 'batch_size': 128, # Switch between training", "attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to", "transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs):", "= 'no_pos' conf.tgt_embed = 'no_pos' # Set all attention mechanisms to ALiBi conf.encoder_attn", "from torch.utils.data import DataLoader from labml import experiment, tracker from labml.configs import option,", "and final tokens \"\"\" # If there are more tokens that the training", "'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len':", "'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models for", "import torch from torch.utils.data import DataLoader from labml import experiment, tracker from labml.configs", "'no_pos' conf.tgt_embed = 'no_pos' # Set all attention mechanisms to ALiBi conf.encoder_attn =", "extend [GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\" # ALiBi based transformer", "conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def main(): # Create", "mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' #", "embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set all attention mechanisms to", "128, # Switch between training and validation for $10$ times # per epoch", "summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on", "# Batch size $128$ 'batch_size': 128, # Switch between training and validation for", "and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run training", "dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context size of $128$", "from labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention", "c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation for position wise feedforward", "experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() # Override configurations experiment.configs(conf, { #", "'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data", "Set all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn", "length (during validation), if self.seq_len < output.shape[0]: # Log the loss at training", "# Create configs conf = Configs() # Override configurations experiment.configs(conf, { # Use", "4, 'transformer.dropout': 0.1, }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model})", "Use character level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '',", "# Train for $32$ epochs 'epochs': 128, # Batch size $128$ 'batch_size': 128,", "based model on Tiny Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html)", "# [Attention with Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment", "'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len': 64,", "blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is ', #", "for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): #", "tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention", "GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and change the", "times # per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512,", "conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def", "[our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader from", "Log the loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1],", "collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations", "< output.shape[0]: # Log the loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.',", "all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn =", "the experiment with experiment.start(): # Run training conf.run() # if __name__ == '__main__':", "[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader from labml import experiment,", "sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', #", "if self.seq_len < output.shape[0]: # Log the loss at training sequence length tracker.add(f'loss.{self.seq_len", "8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models for saving and loading", "transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary sizes for embeddings and", "Longer validation set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output:", "conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs()", "use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set all attention", "\"\"\" Log losses at the initial and final tokens \"\"\" # If there", "labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers", "are more tokens that the training sequence length (during validation), if self.seq_len <", "generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation", "target[self.seq_len - 1])) # Log the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0],", "Set the vocabulary sizes for embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab", "a context size of $128$ 'seq_len': 64, # Use a context size of", "= 'alibi_mha' # return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create", "GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader from labml", "import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations", "seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based", "vocabulary sizes for embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens", "128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses", "Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with", "labml import experiment, tracker from labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset", "'GELU' # ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos'", "labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs", "conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation for position", "Use a context size of $128$ 'valid_seq_len': 80, # Train for $32$ epochs", "output.shape[0]: # Log the loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len", "models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start():", "from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\"", "transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len: int", "'no_pos' # Set all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn =", "'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with `valid_seq_len`", "ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c:", "', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use", "annotated PyTorch experiment to train a [ALiBi model](index.html). This is based on [our", "Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context size of", "# Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)", "{ # Use character level tokenizer 'tokenizer': 'character', # Prompt separator is blank", "is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', #", "\"\"\" ### ALiBi based Transformer configurations \"\"\" # We use our # [configurable", "calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with", "Tiny Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This is", "from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend", "labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from", "# Switch between training and validation for $10$ times # per epoch 'inner_iterations':", "sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def", "'seq_len': 64, # Use a context size of $128$ 'valid_seq_len': 80, # Train", "Attention with Linear Biases (ALiBi) Experiment summary: This experiment trains an Attention with", "$10$ times # per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff':", "# Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment", "_alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader", "TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We", "conf.decoder_mem_attn = 'alibi_mha' # return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") #", "sequence length (during validation), if self.seq_len < output.shape[0]: # Log the loss at", "= 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def main(): # Create experiment", "def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the initial and", "Batch size $128$ 'batch_size': 128, # Switch between training and validation for $10$", "1])) # Log the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) #", "--- title: Attention with Linear Biases (ALiBi) Experiment summary: This experiment trains an", "0.1, }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start", "This is an annotated PyTorch experiment to train a [ALiBi model](index.html). This is", "ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms", "validation data loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size,", "experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare", "'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len': 64, # Use a", "# Use a context size of $128$ 'valid_seq_len': 80, # Train for $32$", "\"\"\" Shuffled validation data loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text,", "import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class", "conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set all attention mechanisms to ALiBi", "based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import", "We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the", "def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\" # We use", "for sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare',", "an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset. ---", "is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data", "ALiBi based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set", "torch.utils.data import DataLoader from labml import experiment, tracker from labml.configs import option, calculate", "calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation", "configurations \"\"\" # We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs()", "# Prompt separator is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt':", "## Configurations We extend [GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\" #", "# 'text': 'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len': 64, #", "= 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log", "_alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)", "target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads,", "loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs):", "ALiBi based Transformer configurations \"\"\" # We use our # [configurable transformer implementation](../configs.html#TransformerConfigs)", "Configs): \"\"\" Shuffled validation data loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,", "# Start the experiment with experiment.start(): # Run training conf.run() # if __name__", "level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', # Starting", "Biases (ALiBi) based model on Tiny Shakespeare dataset. --- # [Attention with Linear", "TransformerConfigs() # Set the vocabulary sizes for embeddings and generating logits conf.n_src_vocab =", "on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader", "\"\"\" Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set", "torch from torch.utils.data import DataLoader from labml import experiment, tracker from labml.configs import", "from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as", "= TransformerConfigs() # Set the vocabulary sizes for embeddings and generating logits conf.n_src_vocab", "and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU", "as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and change", "= Configs() # Override configurations experiment.configs(conf, { # Use character level tokenizer 'tokenizer':", "\"\"\" # We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() #", "'alibi_mha' # return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs", "doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' # Set all", "sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log", "class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and change the attention", "GELU activation for position wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use", "(ALiBi) Experiment summary: This experiment trains an Attention with Linear Biases (ALiBi) based", "# ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed = 'no_pos' #", "c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn,", "# Log the loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))", "is an annotated PyTorch experiment to train a [ALiBi model](index.html). This is based", "# Starting prompt for sampling 'prompt': 'It is ', # Use Tiny Shakespeare", "Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context size", "experiment.configs(conf, { # Use character level tokenizer 'tokenizer': 'character', # Prompt separator is", "(ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to train a [ALiBi model](index.html).", "def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with `valid_seq_len` sequence length \"\"\"", "'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the initial", "@option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with `valid_seq_len` sequence length", "128, # Batch size $128$ 'batch_size': 128, # Switch between training and validation", "Linear Biases (ALiBi) Experiment summary: This experiment trains an Attention with Linear Biases", "from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from", "change the attention mechanism. \"\"\" # ALiBi based transformer (defined below) transformer: TransformerConfigs", "return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf =", "target[0])) # Log the loss at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1],", "$128$ 'valid_seq_len': 80, # Train for $32$ epochs 'epochs': 128, # Batch size", "Switch between training and validation for $10$ times # per epoch 'inner_iterations': 10,", "length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c:", "\"\"\" # If there are more tokens that the training sequence length (during", "for position wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use positional embeddings", "'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models for saving and loading experiment.add_pytorch_models({'model':", "attention mechanism. \"\"\" # ALiBi based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi'", "_alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled", "training sequence length (during validation), if self.seq_len < output.shape[0]: # Log the loss", "validation set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor,", "This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from", "configs conf = Configs() # Override configurations experiment.configs(conf, { # Use character level", "separator is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is", "'batch_size': 128, # Switch between training and validation for $10$ times # per", "Experiment summary: This experiment trains an Attention with Linear Biases (ALiBi) based model", "tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log the loss", "int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\"", "- 1], target[self.seq_len - 1])) # Log the loss at the first token", "with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer,", "initial and final tokens \"\"\" # If there are more tokens that the", "# If there are more tokens that the training sequence length (during validation),", "Shuffled validation data loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len),", "sizes for embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens #", "'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\" # We", "'text': 'tiny_shakespeare_no_split', # Use a context size of $128$ 'seq_len': 64, # Use", "'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text':", "Biases (ALiBi) Experiment summary: This experiment trains an Attention with Linear Biases (ALiBi)", "import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import", "'transformer.dropout': 0.1, }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) #", "\"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs):", "Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\" # We use our #", "labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt", "training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) #", "activation for position wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use positional", "per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8,", "for $32$ epochs 'epochs': 128, # Batch size $128$ 'batch_size': 128, # Switch", "# Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a", "training and validation for $10$ times # per epoch 'inner_iterations': 10, # Transformer", "= c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation for position wise", "return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\"", "Experiment This is an annotated PyTorch experiment to train a [ALiBi model](index.html). This", "import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression", "validation), if self.seq_len < output.shape[0]: # Log the loss at training sequence length", "between training and validation for $10$ times # per epoch 'inner_iterations': 10, #", "$32$ epochs 'epochs': 128, # Batch size $128$ 'batch_size': 128, # Switch between", "experiment, tracker from labml.configs import option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi", "Configs() # Override configurations experiment.configs(conf, { # Use character level tokenizer 'tokenizer': 'character',", "'character', # Prompt separator is blank 'prompt_separator': '', # Starting prompt for sampling", "the initial and final tokens \"\"\" # If there are more tokens that", "PyTorch experiment to train a [ALiBi model](index.html). This is based on [our GPT", "conf.n_tgt_vocab = c.n_tokens # GPT uses GELU activation for position wise feedforward conf.ffn.activation", "_alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with `valid_seq_len` sequence", "validation for $10$ times # per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model':", "context size of $128$ 'seq_len': 64, # Use a context size of $128$", "target: torch.Tensor): \"\"\" Log losses at the initial and final tokens \"\"\" #", "Starting prompt for sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset", "based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len:", "experiment to train a [ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html).", "'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', # Starting prompt for", "Linear Biases (ALiBi) based model on Tiny Shakespeare dataset. --- # [Attention with", "\"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\"", "Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset. --- #", "dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi", "and validation for $10$ times # per epoch 'inner_iterations': 10, # Transformer configurations", "more tokens that the training sequence length (during validation), if self.seq_len < output.shape[0]:", "shuffled_longer_valid_loader(c: Configs): \"\"\" Shuffled validation data loader with `valid_seq_len` sequence length \"\"\" return", "an annotated PyTorch experiment to train a [ALiBi model](index.html). This is based on", "a context size of $128$ 'valid_seq_len': 80, # Train for $32$ epochs 'epochs':", "at the final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\"", "final token tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an", "'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf def main(): # Create experiment experiment.create(name=\"gpt_alibi\")", "train a [ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925)", "based Transformer configurations \"\"\" # We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf", "= c.n_tokens # GPT uses GELU activation for position wise feedforward conf.ffn.activation =", "below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len: int = 128", "experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run training conf.run() #", "# Log the loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len -", "512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models for saving", "# Create experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() # Override configurations", "feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos'", "embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT uses", "self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\" return", "Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This is an", "with Linear Biases (ALiBi)](index.html) Experiment This is an annotated PyTorch experiment to train", "the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at the final", "# [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary sizes for", "batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer", "the vocabulary sizes for embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab =", "self.seq_len < output.shape[0]: # Log the loss at training sequence length tracker.add(f'loss.{self.seq_len -", "= 'GPT_ALiBi' # Longer validation set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader'", "'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is ', # Use", "calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs):", "experiment experiment.create(name=\"gpt_alibi\") # Create configs conf = Configs() # Override configurations experiment.configs(conf, {", "Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch from torch.utils.data import DataLoader from labml import experiment, tracker", "'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4,", "= 'no_pos' # Set all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn", "}) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the", "our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary sizes", "GPT uses GELU activation for position wise feedforward conf.ffn.activation = 'GELU' # ALiBi", "is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is ',", "$128$ 'seq_len': 64, # Use a context size of $128$ 'valid_seq_len': 80, #", "implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set the vocabulary sizes for embeddings and generating", "from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from", "conf.ffn.activation = 'GELU' # ALiBi doesn't use positional embeddings conf.src_embed = 'no_pos' conf.tgt_embed", "the training sequence length (during validation), if self.seq_len < output.shape[0]: # Log the", "loader with `valid_seq_len` sequence length \"\"\" return DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True)", "from labml import experiment, tracker from labml.configs import option, calculate from labml_helpers.datasets.text import", "Create an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all", "labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT", "[GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\" # ALiBi based transformer (defined", "# Set all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha'", "DataLoader(SequentialUnBatchedDataset(text=c.text.valid, dataset=c.text, seq_len=c.valid_seq_len), batch_size=c.batch_size, collate_fn=transpose_batch, shuffle=True) @option(Configs.transformer, 'GPT_ALiBi') def _transformer_configs(c: Configs): \"\"\" ###", "character level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', #", "model on Tiny Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment", "Prompt separator is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It", "configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) #", "option, calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import", "64, # Use a context size of $128$ 'valid_seq_len': 80, # Train for", "torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the initial and final tokens \"\"\"", "= 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the", "loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at", "all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha',", "losses at the initial and final tokens \"\"\" # If there are more", "# Use a context size of $128$ 'seq_len': 64, # Use a context", "1], target[self.seq_len - 1])) # Log the loss at the first token tracker.add(f'loss.0.',", "'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split',", "10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout':", "a [ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\"", "TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len: int = 128 valid_loader =", "tokens that the training sequence length (during validation), if self.seq_len < output.shape[0]: #", "\"\"\" --- title: Attention with Linear Biases (ALiBi) Experiment summary: This experiment trains", "ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return conf", "# Log the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log", "wise feedforward conf.ffn.activation = 'GELU' # ALiBi doesn't use positional embeddings conf.src_embed =", "model](index.html). This is based on [our GPT model](../gpt/index.html). [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925) \"\"\" import torch", "- 1])) # Log the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))", "- 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log the loss at", "\"\"\" # ALiBi based transformer (defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer", "return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha',", "conf.model}) # Start the experiment with experiment.start(): # Run training conf.run() # if", "at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))", "at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at the", "Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and", "Log the loss at the first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the", "to ALiBi conf.encoder_attn = 'alibi_mha' conf.decoder_attn = 'alibi_mha' conf.decoder_mem_attn = 'alibi_mha' # return", "We extend [GPT configurations](../gpt/index.html) and change the attention mechanism. \"\"\" # ALiBi based", "Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # 'text': 'tiny_shakespeare_no_split', # Use a context", "Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html) and change the attention mechanism.", "on Tiny Shakespeare dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This", "# per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads':", "to ALiBi calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def", "import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch from labml_nn.transformers import", "1}.', self.loss_func(output[-1], target[-1])) def _alibi_mha(c: TransformerConfigs): \"\"\" Create an ALiBi attention module \"\"\"", "valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at", "for embeddings and generating logits conf.n_src_vocab = c.n_tokens conf.n_tgt_vocab = c.n_tokens # GPT", "128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models", "# Use character level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator':", "with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset. --- # [Attention", "first token tracker.add(f'loss.0.', self.loss_func(output[0], target[0])) # Log the loss at the final token", "loss at training sequence length tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len -", "size $128$ 'batch_size': 128, # Switch between training and validation for $10$ times", "there are more tokens that the training sequence length (during validation), if self.seq_len", "self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1])) # Log the loss at the first", "c.n_tokens # GPT uses GELU activation for position wise feedforward conf.ffn.activation = 'GELU'", "'', # Starting prompt for sampling 'prompt': 'It is ', # Use Tiny", "set valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target:", "import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ## Configurations We extend [GPT configurations](../gpt/index.html)", "to train a [ALiBi model](index.html). This is based on [our GPT model](../gpt/index.html). [![View", "for $10$ times # per epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128,", "labml_nn.transformers import TransformerConfigs from labml_nn.transformers.gpt import Configs as GPTConfigs class Configs(GPTConfigs): \"\"\" ##", "epoch 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8, 'transformer.n_layers':", "(ALiBi) based model on Tiny Shakespeare dataset. --- # [Attention with Linear Biases", "prompt for sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text':", "'transformer.n_heads': 8, 'transformer.n_layers': 4, 'transformer.dropout': 0.1, }) # Set models for saving and", "Transformer configurations \"\"\" # We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf =", "# Override configurations experiment.configs(conf, { # Use character level tokenizer 'tokenizer': 'character', #", "# We use our # [configurable transformer implementation](../configs.html#TransformerConfigs) conf = TransformerConfigs() # Set", "valid_seq_len: int = 128 valid_loader = 'shuffled_longer_valid_loader' def other_metrics(self, output: torch.Tensor, target: torch.Tensor):", "output: torch.Tensor, target: torch.Tensor): \"\"\" Log losses at the initial and final tokens", "calculate from labml_helpers.datasets.text import SequentialUnBatchedDataset from labml_nn.transformers.alibi import AlibiMultiHeadAttention from labml_nn.experiments.nlp_autoregression import transpose_batch", "(defined below) transformer: TransformerConfigs = 'GPT_ALiBi' # Longer validation set valid_seq_len: int =", "dataset. --- # [Attention with Linear Biases (ALiBi)](index.html) Experiment This is an annotated", "conf.tgt_embed = 'no_pos' # Set all attention mechanisms to ALiBi conf.encoder_attn = 'alibi_mha'", "that the training sequence length (during validation), if self.seq_len < output.shape[0]: # Log", "Create configs conf = Configs() # Override configurations experiment.configs(conf, { # Use character", "an ALiBi attention module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention", "module \"\"\" return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) # Set all attention mechanisms to ALiBi", "'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha) calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha) @option(Configs.valid_loader) def shuffled_longer_valid_loader(c: Configs): \"\"\"", "\"\"\" import torch from torch.utils.data import DataLoader from labml import experiment, tracker from", "_transformer_configs(c: Configs): \"\"\" ### ALiBi based Transformer configurations \"\"\" # We use our", "saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run" ]
[ "x # self.next = None class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode:", "Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head temp = self.next", "class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head temp =", "ListNode) -> ListNode: self.next = head temp = self.next while temp and temp.next:", "示例 1: 输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode)", "1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' #", "给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。 示例 1: 输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3", "head temp = self.next while temp and temp.next: if temp.val == temp.next.val: temp.next", "deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head temp = self.next while temp", "1: 输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list", "if temp.val == temp.next.val: temp.next = temp.next.next else: temp = temp.next return self.next", "self.next = head temp = self.next while temp and temp.next: if temp.val ==", "输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list '''", "# self.val = x # self.next = None class Solution: def deleteDuplicates(self, head:", "= head temp = self.next while temp and temp.next: if temp.val == temp.next.val:", "-> ListNode: self.next = head temp = self.next while temp and temp.next: if", "1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. # class ListNode: #", "输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition", "2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list.", "x): # self.val = x # self.next = None class Solution: def deleteDuplicates(self,", "head: ListNode) -> ListNode: self.next = head temp = self.next while temp and", "1->2 示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for", "while temp and temp.next: if temp.val == temp.next.val: temp.next = temp.next.next else: temp", "__init__(self, x): # self.val = x # self.next = None class Solution: def", "temp = self.next while temp and temp.next: if temp.val == temp.next.val: temp.next =", "self.next while temp and temp.next: if temp.val == temp.next.val: temp.next = temp.next.next else:", "def deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head temp = self.next while", "# self.next = None class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next", "temp.next: if temp.val == temp.next.val: temp.next = temp.next.next else: temp = temp.next return", "''' # Definition for singly-linked list. # class ListNode: # def __init__(self, x):", "输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. # class ListNode:", "Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val", "singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x", "def __init__(self, x): # self.val = x # self.next = None class Solution:", "self.next = None class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next =", "temp and temp.next: if temp.val == temp.next.val: temp.next = temp.next.next else: temp =", "# def __init__(self, x): # self.val = x # self.next = None class", "class ListNode: # def __init__(self, x): # self.val = x # self.next =", "ListNode: # def __init__(self, x): # self.val = x # self.next = None", "= x # self.next = None class Solution: def deleteDuplicates(self, head: ListNode) ->", "ListNode: self.next = head temp = self.next while temp and temp.next: if temp.val", "''' 给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。 示例 1: 输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3 输出:", "来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. # class ListNode: # def", "list. # class ListNode: # def __init__(self, x): # self.val = x #", "None class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head temp", "and temp.next: if temp.val == temp.next.val: temp.next = temp.next.next else: temp = temp.next", "self.val = x # self.next = None class Solution: def deleteDuplicates(self, head: ListNode)", "1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. # class", "for singly-linked list. # class ListNode: # def __init__(self, x): # self.val =", "# Definition for singly-linked list. # class ListNode: # def __init__(self, x): #", "# class ListNode: # def __init__(self, x): # self.val = x # self.next", "= self.next while temp and temp.next: if temp.val == temp.next.val: temp.next = temp.next.next", "<gh_stars>0 ''' 给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。 示例 1: 输入: 1->1->2 输出: 1->2 示例 2: 输入: 1->1->2->3->3", "示例 2: 输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked", "链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. # class ListNode: # def __init__(self,", "输入: 1->1->2->3->3 输出: 1->2->3 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list ''' # Definition for singly-linked list. #", "= None class Solution: def deleteDuplicates(self, head: ListNode) -> ListNode: self.next = head" ]
[ "= base_nc self.num_feats = num_feats self.reverse = reverse # assume feats from big", "k_feats=None, q_feats=None): # input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas)", "the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and logvar length #", "Condition + FiLM(Feat_img) -> code_cond # _ denotes out_feats of ecnoder_c is None", "feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks)", "mechanism # Decoder's head block out_nc = Encoder's tail block in_nc # self.base_nc", "real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point", "in bottleneck layer for alpha and beta # update: remove bn in FiLM", "use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor", "both Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc return: [features] +", "Due to blurry edges, reduce the tail block kernel size back to 3", "X H) k -> q as Transformation returns : out : self attention", "= ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for", "== 4) N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps", "divide-by-zero. size = feat.size() assert (len(size) == 4) N, C = size[:2] feat_var", "self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2", "channel # for concating skip feats from encoder # torch.cat(feat, skip_feat) -> feat_next", "a resnet block in bottleneck layer for alpha and beta # update: remove", "feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace SFT module, but", "self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in,", "q as Transformation returns : out : self attention value + input feature", "= VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if", "and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar)", "feats): assert len(feats) == self.num_feats params = [] for i in range(self.num_feats): #", "= opt.num_up # Decoder feat layer num # self.neck_depth = neck_depth # FiLM", "len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats)", "# Similar to InGAN, increase kernel_size of entry block to 7 self.head_block =", "out_feats of ecnoder_c is None _, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug,", "skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond if skip_feats", "self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img, code_img", "mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view() else: out", "x = self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i],", "if self.out_feats: # Out feat before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats", "DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if", "class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output", "self.use_attn: attn_layers = [] for i in range(self.num_down): # double channels after reduce", "def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return", "4) N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std", "FiLM parameters(alpha, beta) for fixed-point loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator,", "k : cond feature maps( B X C X W X H) q", "# 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc", "inverse self.use_VAE = use_VAE # produce distribution for code self.use_attn = use_attn #", "else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input:", "and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and", "as bottleneck depth # while Guided-pix2pix use fixed 100 across all feature maps", "out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k,", "of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug -", "visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict =", "interpolate feats according to in_shapes self.skip_feats = skip_feats # whether concat skip feats", "= ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for i in range(self.num_down): #", "params: num_down, base_nc, out_nc return: [features] + [Code] \"\"\" def __init__(self, num_down, base_nc,", "# attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat and after FiLM/Attention feats.append(x)", "bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1)", "x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X", "feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply FiLM alpha and beta #", "base_nc, in_nc return: Image U-Net skip connections help little. \"\"\" def __init__(self, num_up,", "out_nc self.input_FiLM = input_FiLM # whether input FiLMed factors self.out_feats = out_feats #", "in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # using input_FiLM as affine transformation", "from encoder self.use_attn = use_attn # use attention mechanism # Decoder's head block", "num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down", "or code_cond if skip_feats is not None: assert len(skip_feats) == self.num_up, \"skip feats", "detach() # update: no need for add 1 for relative feats ratio #", "= self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample", "+ [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False,", "2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self,", "in range(num_feats): # nc_factor nc_factor = 2 ** (num_feats - i) if reverse:", "logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None,", "* height).permute(0, 2, 1) # B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize,", "# Encoder feat layer num self.num_up = opt.num_up # Decoder feat layer num", "fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor //", "FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out", "{\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] =", "out = self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for", "= skip_feats # whether concat skip feats from encoder self.use_attn = use_attn #", "torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma", "for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug #", "if self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if", "# attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: #", "blurry edges, reduce the tail block kernel size back to 3 self.tail_block =", "seperates the scaling and shiftting, just keep the original naming \"\"\" def __init__(self,", "assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert", "nc_factor = 2 ** (self.num_up - i) if skip_feats: # double UpConv input", "code_cond # _ denotes out_feats of ecnoder_c is None _, feats_cond, _, _", "cond_aug rel_feats_ratio = [] # use for alpha(multiplier of FiLM) for f_c, f_c_a", "# Decoder's head block out_nc = Encoder's tail block in_nc # self.base_nc *", "FiLM alpha and beta # Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds", "visualization alpha_beta_list = [] # for fixed-point loss in zero-reconstruction for fr, fd,", "aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point loss and visualization", "\"k_feats and q_feats mismatch\" feats, shapes, attn_maps = None, None, None if self.out_feats:", "40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out", "if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc", "\"\"\" inputs : x : input feature maps( B X C X W", "2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc", "<reponame>ddlee-cn/SemIA from torch.autograd import Variable as Vb from semia.network import * class Encoder(BaseNetwork):", "= self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) #", "assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert", "nc_factor, base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth):", "whether interpolate feats according to in_shapes self.skip_feats = skip_feats # whether concat skip", "self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x)", "X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B", "return alpha * ((x - mean) / std) + beta def forward(self, code,", "after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) #", "block out_nc = Encoder's tail block in_nc # self.base_nc * (2 ** self.num_up)", "32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc", "self.double = double # whether the rel_feats are concated, instead of diff/ratio bottlenecks", "-> feat_next # 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor *", "skip_feats is not None: assert len(skip_feats) == self.num_up, \"skip feats number mismatch\" if", "super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc", "# feats_cond_aug / feats_cond rel_feats_diff = [] # use for beta(bias of FiLM)", "= torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2,", "self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down),", "assert (len(size) == 4) N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2)", "out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from", "= opt.base_nc # base channel size for conv layers self.cond_nc = opt.cond_nc #", "== len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) ==", "int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor //", "forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond", "feats number mismatch\" if self.in_shapes: if in_shapes is not None: assert len(in_shapes) ==", "for code self.use_attn = use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in =", "= out_feats # output feats self.out_shapes = out_shapes # output feats shape for", "** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat,", "denotes out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\":", "and cond_aug rel_feats_ratio = [] # use for alpha(multiplier of FiLM) for f_c,", "= Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha", "feat, eps=1e-5): # eps is a small value added to the variance to", "attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE:", "** (i + 1) # use base_nc * nc_factor // 2 as bottleneck", "= self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x =", "None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds,", "1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\":", "** self.num_up)) up_block = [] for i in range(self.num_up): nc_factor = 2 **", "\"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] =", "nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of tail block of decoder to", "def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img", "if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x,", "FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond if skip_feats is not None:", "FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x", "opt.num_up # Decoder feat layer num # self.neck_depth = neck_depth # FiLM layer", "self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats, shapes,", "input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc", "alpha * ((x - mean) / std) + beta def reparameterize(self, mu, logvar):", "# attn_maps = [] if noise is not None: input = torch.cat((input, noise),", "__init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat layer num self.num_up", "DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu,", "layer for alpha and beta # update: remove bn in FiLM module block_list", "from encoder # torch.cat(feat, skip_feat) -> feat_next # 256 -> 64, 128 ->", "more memory and brings a lot computational burden cond_feats as Key aug_cond_feats as", "== self.num_up, \"skip feats number mismatch\" if self.in_shapes: if in_shapes is not None:", "out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc", "mismatch\" feats, shapes, attn_maps = None, None, None if self.out_feats: feats = []", "burden cond_feats as Key aug_cond_feats as Query image_feats as Value \"\"\" def __init__(self,", "attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x", "is not None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else: raise", "- i - 1], mode='nearest') out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module):", "N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height)", "kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)", "# use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest)", "alpha to around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1,", "-> code_cond # _ denotes out_feats of ecnoder_c is None _, feats_cond, _,", "= self.head_block(input) for i in range(self.num_down): if self.out_shapes: # Output feature shape before", "self.out_feats: feats = [] if self.out_shapes: shapes = [] # do not store", "nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x,", "= self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: #", "# use attention mechanism # Decoder's head block out_nc = Encoder's tail block", "for seg self.im_nc = opt.im_nc # Image channel size, commonly 3 self.opt =", "return: Image U-Net skip connections help little. \"\"\" def __init__(self, num_up, base_nc, in_nc,", "# attach FiLM source features to main graph instead detach() # update: no", "assume feats from big to small(more ncs) self.double = double # whether the", "reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse = reverse # assume", "num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse = reverse", "layers self.cond_nc = opt.cond_nc # Condition channel size, 3 for seg self.im_nc =", "** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc,", "width * height).permute(0, 2, 1) # B X (W*H) X C proj_key =", "feats_cond_aug / feats_cond rel_feats_diff = [] # use for beta(bias of FiLM) for", "eps=1e-5): # eps is a small value added to the variance to avoid", "or self.opt.D_use_FiLM: # Relative feats between cond and cond_aug rel_feats_ratio = [] #", "rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = []", "connections help little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False,", "H) q : aug cond feature maps( B X C X W X", "nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers)", "& beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img),", "feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between", "+= [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main =", "rel_feats_list = [] # for visualization alpha_beta_list = [] # for fixed-point loss", "in zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): #", "64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor //", "in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond", "Encoder for both Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc return:", "self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma", "len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps = None, None if", "X C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B X C", "to InGAN, increase kernel_size of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc,", "2 ** (num_feats - i) if reverse: nc_factor = 2 ** (i +", "__init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up =", "raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up,", "feat_next # 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2,", "f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img in", "self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N out =", "self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\" inputs : x", "feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from encoder", "use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc = in_nc", "+ 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = [] #", "= input_FiLM # using input_FiLM as affine transformation self.out_feats = out_feats # output", "feats shape for inverse self.use_VAE = use_VAE # produce distribution for code self.use_attn", "x : input feature maps( B X C X W X H) k", "# whether output decoder features self.in_shapes = in_shapes # whether interpolate feats according", "B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention", "rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\":", "self.cond_nc = opt.cond_nc # Condition channel size, 3 for seg self.im_nc = opt.im_nc", "a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\":", "self.use_attn: # attn_maps = [] if noise is not None: input = torch.cat((input,", "return alpha * ((x - mean) / std) + beta def reparameterize(self, mu,", "# cond2img in Decoder: apply FiLM alpha and beta # Feat_cond -> alpha,", "self.base_nc = base_nc self.num_feats = num_feats self.reverse = reverse # assume feats from", "# the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and logvar length", "# whether interpolate feats according to in_shapes self.skip_feats = skip_feats # whether concat", "-> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor", "feat before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is not None: #", "feats.append(x) if skip_feats is not None: # merge skip feats before UpSample skip_feat", "== self.num_feats params = [] for i in range(self.num_feats): # attach FiLM source", "out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None,", "layer bottleneck depth self.base_nc = opt.base_nc # base channel size for conv layers", "None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if", "skip connections help little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False,", "X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize,", "None: assert len(skip_feats) == self.num_up, \"skip feats number mismatch\" if self.in_shapes: if in_shapes", "if skip_feats: # double UpConv input channel, and half output channel # for", "Feature Translation layer) Our version seperates the scaling and shiftting, just keep the", "code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc", "i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if", "\"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__()", "# Relative feats between cond and cond_aug rel_feats_ratio = [] # use for", "and brings a lot computational burden cond_feats as Key aug_cond_feats as Query image_feats", "for i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn:", "to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = []", "C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C,", "concated, instead of diff/ratio bottlenecks = [] for i in range(num_feats): # nc_factor", "128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor // 2)))", "concat skip feats from encoder self.use_attn = use_attn # use attention mechanism #", "version seperates the scaling and shiftting, just keep the original naming \"\"\" def", "feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x = X.clone() mean, std =", "self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc)", "[] for i in range(self.num_up): nc_factor = 2 ** (self.num_up - i) if", "Module Attetion module may replace SFT module, but takes much more memory and", "D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse()", "self.in_shapes: # interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64,", "eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C,", "decoder to 7 # Due to blurry edges, reduce the tail block kernel", "[] for i in range(self.num_up): # double channels after reduce spatial size nc_factor", "-> q as Transformation returns : out : self attention value + input", "10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16,", "shift rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone()", "N (N is Width*Height) \"\"\" m_batchsize, C, width, height = x.size() proj_query =", "alpha_beta_list = [] # for fixed-point loss in zero-reconstruction for fr, fd, a,", "torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40])", "just keep the original naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__()", "big to small(more ncs) self.double = double # whether the rel_feats are concated,", "to main graph instead detach() # update: no need for add 1 for", "[] for i in range(num_feats): # nc_factor nc_factor = 2 ** (num_feats -", "self.softmax(energy) # BX (N) X (N) every pixel has W*H scores proj_value =", ": x : input feature maps( B X C X W X H)", "H) k -> q as Transformation returns : out : self attention value", "added to the variance to avoid divide-by-zero. size = feat.size() assert (len(size) ==", "use for alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a +", "= None, None, None if self.out_feats: feats = [] if self.out_shapes: shapes =", "mean = mean.expand_as(x) std = std.expand_as(x) return alpha * ((x - mean) /", "- mean) / std) + beta def reparameterize(self, mu, logvar): if self.training: std", "noise + z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc,", "= out_nc self.input_FiLM = input_FiLM # using input_FiLM as affine transformation self.out_feats =", "nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is a small value added to", "((x - mean) / std) + beta def reparameterize(self, mu, logvar): if self.training:", "= use_attn # use attention mechanism # Decoder's head block out_nc = Encoder's", "= alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise +", "[Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False,", "cond2img in Decoder: apply FiLM alpha and beta # Feat_cond -> alpha, beta", "1 for relative feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params", "feats = [] # if self.use_attn: # attn_maps = [] x = self.head_block(code)", "attn_maps = None, None if self.out_feats: feats = [] # if self.use_attn: #", "= nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self,", "= [] # add a resnet block in bottleneck layer for alpha and", "E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img, code_img code_i, feats_img,", "mu and logvar length # Similar to InGAN, increase kernel_size of entry block", "\"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds", "out_feats # whether output decoder features self.in_shapes = in_shapes # whether interpolate feats", "import Variable as Vb from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for", "\"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates the scaling and shiftting, just", "out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc", "affine transformation self.out_feats = out_feats # output feats self.out_shapes = out_shapes # output", "* (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for i in", "# use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down),", "7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for i in", "for i in range(self.num_down): # double channels after reduce spatial size nc_factor =", "* class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map) and Image(+Noise) params:", "# also apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if", "shiftting, just keep the original naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False):", "\"k_feats and q_feats mismatch\" feats, attn_maps = None, None if self.out_feats: feats =", "self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i],", "# transpose check attention = self.softmax(energy) # BX (N) X (N) every pixel", "# Condition + FiLM(Feat_img) -> code_cond # _ denotes out_feats of ecnoder_c is", "block in bottleneck layer for alpha and beta # update: remove bn in", "self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond) -> Fake_img # _ denotes", "also output FiLM parameters(alpha, beta) for fixed-point loss and visualization \"\"\" def __init__(self,", "Transformation returns : out : self attention value + input feature attention: B", "Encoder feat layer num self.num_up = opt.num_up # Decoder feat layer num #", "x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat", "concating skip feats from encoder # torch.cat(feat, skip_feat) -> feat_next # 256 ->", "Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from encoder parmas: num_up, base_nc, in_nc", "E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img, code_img code_i,", "feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM", "output channel # for concating skip feats from encoder # torch.cat(feat, skip_feat) ->", "self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map =", "2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor", "output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point loss and visualization \"\"\"", "rel_feats are concated, instead of diff/ratio bottlenecks = [] for i in range(num_feats):", "self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def", "_, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond", "FiLM layer bottleneck depth self.base_nc = opt.base_nc # base channel size for conv", "rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply FiLM alpha", "D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond,", "attn_maps = [] if noise is not None: input = torch.cat((input, noise), 1)", "= nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1)", "# use for beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a,", "None if self.out_feats: feats = [] if self.out_shapes: shapes = [] # do", "attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma *", "skip_feats # whether concat skip feats from encoder self.use_attn = use_attn # use", "256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc *", "# for visualization alpha_beta_list = [] # for fixed-point loss in zero-reconstruction for", "ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = [] for i in range(self.num_up):", "len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats)", "len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\")", "self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev instead", "== self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps = None, None if self.out_feats:", "\"\"\" Decoder for Image input: feature from encoder parmas: num_up, base_nc, in_nc return:", "FiLM/Attention feats.append(x) if skip_feats is not None: # merge skip feats before UpSample", "from torch.autograd import Variable as Vb from semia.network import * class Encoder(BaseNetwork): \"\"\"", "= self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image", "in range(self.num_down): # double channels after reduce spatial size nc_factor = 2 **", "20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up -", "attention = self.softmax(energy) # BX (N) X (N) every pixel has W*H scores", "# B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check", "input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas", "[] x = self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x,", "skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i],", "// 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block =", "# B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height)", "b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around 0", "every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) #", "naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats", "before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i])", "else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers =", "feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean =", "[] # for fixed-point loss in zero-reconstruction for fr, fd, a, b in", "if self.in_shapes: # interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1,", "in Decoder: apply FiLM alpha and beta # Feat_cond -> alpha, beta alpha_conds", "attention: B X N X N (N is Width*Height) \"\"\" m_batchsize, C, width,", "= double # whether the rel_feats are concated, instead of diff/ratio bottlenecks =", "FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats:", "nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block", "self.reverse = reverse # assume feats from big to small(more ncs) self.double =", "width, height) out = self.gamma * out + x return out, attention class", "not store attn_maps # if self.use_attn: # attn_maps = [] if noise is", "use attention mechanism # Decoder's head block out_nc = Encoder's tail block in_nc", "of ecnoder_c is None _, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _,", "no need for add 1 for relative feats ratio # alpha & beta", "prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\"", "(num_feats - i) if reverse: nc_factor = 2 ** (i + 1) #", "= opt # use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2", "self.input_FiLM = input_FiLM # using input_FiLM as affine transformation self.out_feats = out_feats #", "self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = [] for i in range(self.num_up): #", "= [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list = [] # add", "feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1,", "None, None if self.out_feats: feats = [] # if self.use_attn: # attn_maps =", "100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2))", "opt # use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 **", "inputs : x : input feature maps( B X C X W X", "\"\"\" m_batchsize, C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width *", "feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and logvar length # Similar to", "are concated, instead of diff/ratio bottlenecks = [] for i in range(num_feats): #", "or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def", "i in range(self.num_up): nc_factor = 2 ** (self.num_up - i) if skip_feats: #", "if self.in_shapes: if in_shapes is not None: assert len(in_shapes) == self.num_up, \"in_shapes number", "= self.key_conv(k).view(m_batchsize, -1, width * height) # B X C x (*W*H) energy", "as Vb from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for both Condition", "self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition +", "nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else:", "2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma * out", "Relative feats between cond and cond_aug rel_feats_ratio = [] # use for alpha(multiplier", "mode='nearest') out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion", "fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None,", "Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or", "for concating skip feats from encoder # torch.cat(feat, skip_feat) -> feat_next # 256", "block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main", "Decoder feat layer num # self.neck_depth = neck_depth # FiLM layer bottleneck depth", "Image U-Net skip connections help little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc,", "and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and", "32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x", "in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas", "nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is a small", "x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out = self.tail_block(x) return", "width * height) # B X C x (*W*H) energy = torch.bmm(proj_query, proj_key)", "= in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # using input_FiLM as affine", "params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map =", "out_feats=True) # use noise + z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i", "# merge skip feats before UpSample skip_feat = skip_feats[self.num_up - i - 1]", "# eps is a small value added to the variance to avoid divide-by-zero.", "beta def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_())", "for add 1 for relative feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i]))", "beta # Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list", "self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def", "params = [] for i in range(self.num_feats): # attach FiLM source features to", "# Condition channel size, 3 for seg self.im_nc = opt.im_nc # Image channel", "return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace SFT", "beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img", "use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if", "base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc", "= [] # if self.use_attn: # attn_maps = [] x = self.head_block(code) for", "if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: #", "base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if", "self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input:", "fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list,", "size = feat.size() assert (len(size) == 4) N, C = size[:2] feat_var =", "_ denotes out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None,", "attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc", "[] # if self.use_attn: # attn_maps = [] x = self.head_block(code) for i", "None if self.out_feats: feats = [] # if self.use_attn: # attn_maps = []", "reduce spatial size nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor))", "for i in range(self.num_up): # double channels after reduce spatial size nc_factor =", "not None: assert len(skip_feats) == self.num_up, \"skip feats number mismatch\" if self.in_shapes: if", "{\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"],", "Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc,", "feats before UpSample skip_feat = skip_feats[self.num_up - i - 1] if self.input_FiLM: #", "range(self.num_feats): # attach FiLM source features to main graph instead detach() # update:", "self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return alpha * ((x - mean)", "self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list = []", "None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] =", "- mean) / std) + beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None,", "# use noise + z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i =", "= 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def", "= 2 ** (num_feats - i) if reverse: nc_factor = 2 ** (i", "(self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5):", "= [] # do not store attn_maps # if self.use_attn: # attn_maps =", "if in_shapes is not None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\"", "mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc =", "* 2, nc, kernel=1, pad=0)] else: block_list = [] # add a resnet", "self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv =", "self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"],", "FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if", "feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x,", "self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\"", "* (2 ** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc,", "for conv layers self.cond_nc = opt.cond_nc # Condition channel size, 3 for seg", "or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas", "= logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input,", "number mismatch\" if self.in_shapes: if in_shapes is not None: assert len(in_shapes) == self.num_up,", "lot computational burden cond_feats as Key aug_cond_feats as Query image_feats as Value \"\"\"", "= [] for i in range(self.num_down): # double channels after reduce spatial size", "std) + beta def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps", "beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list = [] #", "as Key aug_cond_feats as Query image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32):", "k, q): \"\"\" inputs : x : input feature maps( B X C", "self.out_nc = out_nc self.input_FiLM = input_FiLM # whether input FiLMed factors self.out_feats =", "skip_feats: # double UpConv input channel, and half output channel # for concating", "for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) #", "2, nc, kernel=1, pad=0)] else: block_list = [] # add a resnet block", "feature from encoder parmas: num_up, base_nc, in_nc return: Image U-Net skip connections help", "FiLMed factors self.out_feats = out_feats # whether output decoder features self.in_shapes = in_shapes", "image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point loss", "(N is Width*Height) \"\"\" m_batchsize, C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize,", "= feat.size() assert (len(size) == 4) N, C = size[:2] feat_var = feat.view(N,", "of decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\":", "proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B X C x (*W*H)", "self.out_nc) if self.use_attn: attn_layers = [] for i in range(self.num_down): # double channels", "nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self, feats): assert", "x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond #", "1] if self.input_FiLM: # also apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat,", "module, but takes much more memory and brings a lot computational burden cond_feats", "nc_factor // 2 as bottleneck depth # while Guided-pix2pix use fixed 100 across", "kernel=3, pad=1) if self.use_attn: attn_layers = [] for i in range(self.num_up): # double", "self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat and", "self.neck_depth = neck_depth # FiLM layer bottleneck depth self.base_nc = opt.base_nc # base", "input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i =", "after reduce spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc", "height) # B X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1))", "feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out", "feature maps( B X C X W X H) q : aug cond", "cond and cond_aug rel_feats_ratio = [] # use for alpha(multiplier of FiLM) for", "attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat before", "# double channels after reduce spatial size nc_factor = 2 ** (self.num_up -", "in_shapes self.skip_feats = skip_feats # whether concat skip feats from encoder self.use_attn =", "q_feats mismatch\" feats, attn_maps = None, None if self.out_feats: feats = [] #", "40]) x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out = self.tail_block(x)", "FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map", "_ = self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM:", "self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape)", "alpha and beta # Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds =", "double # whether the rel_feats are concated, instead of diff/ratio bottlenecks = []", "base_nc, out_nc return: [features] + [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc,", "[] # for visualization alpha_beta_list = [] # for fixed-point loss in zero-reconstruction", "_, _ = self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or", "and q_feats mismatch\" feats, attn_maps = None, None if self.out_feats: feats = []", "= self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map", "between cond and cond_aug rel_feats_ratio = [] # use for alpha(multiplier of FiLM)", "self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of tail block of", "# shift rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(),", "increase kernel_size of tail block of decoder to 7 # Due to blurry", "base channel size for conv layers self.cond_nc = opt.cond_nc # Condition channel size,", "skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc", "= self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N out", "self).__init__() self.num_down = opt.num_down # Encoder feat layer num self.num_up = opt.num_up #", "= {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse()", "E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"],", "shapes = [] # do not store attn_maps # if self.use_attn: # attn_maps", "beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization", "def affine_transformation(self, X, alpha, beta): x = X.clone() mean, std = self.calc_mean_std(x) mean", "kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) #", "of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block =", "self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map)", "may replace SFT module, but takes much more memory and brings a lot", "b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None}", "k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample and FiLM/Attention", "# double channels after reduce spatial size nc_factor = 2 ** (i +", "in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc = in_nc", "2 ** (i + 1) # use base_nc * nc_factor // 2 as", "self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc,", "x = self.head_block(input) for i in range(self.num_down): if self.out_shapes: # Output feature shape", "pad=3) down_block = [] for i in range(self.num_down): # double channels after reduce", "reduce the tail block kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc,", "feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out =", "torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: # interpolate feature size after", "= nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is a small value added", "self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # whether input FiLMed", "self.num_up = num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM", "in_nc return: Image U-Net skip connections help little. \"\"\" def __init__(self, num_up, base_nc,", "= reverse # assume feats from big to small(more ncs) self.double = double", "std = std.expand_as(x) return alpha * ((x - mean) / std) + beta", "* nc_factor // 2 as bottleneck depth # while Guided-pix2pix use fixed 100", "self.num_feats = num_feats self.reverse = reverse # assume feats from big to small(more", "C, 1, 1) return feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x =", "cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond # _", "and half output channel # for concating skip feats from encoder # torch.cat(feat,", "self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list, alpha_beta_list else: return", "self.out_feats: # Out feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar", "mean.expand_as(x) std = std.expand_as(x) return alpha * ((x - mean) / std) +", "input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn)", "noise), 1) x = self.head_block(input) for i in range(self.num_down): if self.out_shapes: # Output", "C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy)", "3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = [] for", "seg self.im_nc = opt.im_nc # Image channel size, commonly 3 self.opt = opt", "self.out_nc = out_nc self.input_FiLM = input_FiLM # using input_FiLM as affine transformation self.out_feats", "image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim", "7 # Due to blurry edges, reduce the tail block kernel size back", "feats self.out_shapes = out_shapes # output feats shape for inverse self.use_VAE = use_VAE", "# B X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out", "beta): x = X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std =", "self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim,", "use_VAE # produce distribution for code self.use_attn = use_attn # use attention mechanism", "torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i -", "size, commonly 3 self.opt = opt # use FiLM or Cond-Attn code_c_nc, code_i_nc", "= out.view() else: out = self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork):", "else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block) #", "encoder self.use_attn = use_attn # use attention mechanism # Decoder's head block out_nc", "_, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None # code_img", "16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i - 1],", "+ input feature attention: B X N X N (N is Width*Height) \"\"\"", "code: code_img or code_cond if skip_feats is not None: assert len(skip_feats) == self.num_up,", "Variable as Vb from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for both", "factors self.out_feats = out_feats # whether output decoder features self.in_shapes = in_shapes #", "10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40])", "* nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double:", "B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) #", "bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True),", "((x - mean) / std) + beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None,", "self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) #", "mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\"", "source features to main graph instead detach() # update: no need for add", "super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat layer num self.num_up = opt.num_up", "SFT module, but takes much more memory and brings a lot computational burden", "block kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if", "= self.base_nc * (2 ** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c =", "if skip_feats is not None: assert len(skip_feats) == self.num_up, \"skip feats number mismatch\"", "eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): #", "if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 **", "input_FiLM # using input_FiLM as affine transformation self.out_feats = out_feats # output feats", "# _ denotes out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\":", "zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff", "self.in_shapes = in_shapes # whether interpolate feats according to in_shapes self.skip_feats = skip_feats", "range(num_feats): # nc_factor nc_factor = 2 ** (num_feats - i) if reverse: nc_factor", "Fake_img # _ denotes out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\": None,", "in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20])", "= code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and", "and logvar length # Similar to InGAN, increase kernel_size of entry block to", "original naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc", "check attention = self.softmax(energy) # BX (N) X (N) every pixel has W*H", "use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened", "# Similar to InGAN, increase kernel_size of tail block of decoder to 7", "Image input: feature from encoder parmas: num_up, base_nc, in_nc return: Image U-Net skip", "# whether the rel_feats are concated, instead of diff/ratio bottlenecks = [] for", "output FiLM parameters(alpha, beta) for fixed-point loss and visualization \"\"\" def __init__(self, opt):", "self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down,", "input feature attention: B X N X N (N is Width*Height) \"\"\" m_batchsize,", "* nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2)))", "self.base_nc * (2 ** self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block =", "- feats_cond_aug # cond2img in Decoder: apply FiLM alpha and beta # Feat_cond", "= self.up_block[i](x) if self.in_shapes: # interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1])", "** self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc *", "None, None, None if self.out_feats: feats = [] if self.out_shapes: shapes = []", "* ((x - mean) / std) + beta def forward(self, code, skip_feats=None, in_shapes=None,", "+ FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of decoder_i is None D_param_dict", "forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img if", "FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates the scaling and shiftting,", "produce distribution for code self.use_attn = use_attn # use attention mechanism if self.use_VAE:", "maps( B X C X W X H) k : cond feature maps(", "\"skip feats number mismatch\" if self.in_shapes: if in_shapes is not None: assert len(in_shapes)", "Decoder's head block out_nc = Encoder's tail block in_nc # self.base_nc * (2", "= base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # whether", "double channels after reduce spatial size nc_factor = 2 ** (i + 1)", "-1, width * height) # B X C X N out = torch.bmm(proj_value,", "Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha =", "# update: remove bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc,", "nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN,", "self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True)", "self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view() else: out = self.tail_block(x) return", "ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block = [] for i in range(self.num_up):", "apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat,", "width * height) # B X C X N out = torch.bmm(proj_value, attention.permute(0,", "computational burden cond_feats as Key aug_cond_feats as Query image_feats as Value \"\"\" def", "= self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X (W*H) X", "feat layer num # self.neck_depth = neck_depth # FiLM layer bottleneck depth self.base_nc", "3 for seg self.im_nc = opt.im_nc # Image channel size, commonly 3 self.opt", "attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates the scaling", "code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc =", "bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)]", "/ feats_cond rel_feats_diff = [] # use for beta(bias of FiLM) for f_c,", "= F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out = self.tail_block(x) return out,", "= base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # using", "== self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps = None, None, None", "back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers =", "self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc #", "= ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = [] for i in", "code_cond if skip_feats is not None: assert len(skip_feats) == self.num_up, \"skip feats number", "= [] if self.out_shapes: shapes = [] # do not store attn_maps #", "noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img if self.input_FiLM: assert", "C, width, height) out = self.gamma * out + x return out, attention", "# add a resnet block in bottleneck layer for alpha and beta #", "self.input_FiLM: # also apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i])", "the original naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc =", "= X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return", "= 2 ** (i + 1) # use base_nc * nc_factor // 2", "// bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim,", "for both Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc return: [features]", "feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply FiLM", "if self.use_attn: attn_layers = [] for i in range(self.num_down): # double channels after", "use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc =", "len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps = None,", "bottleneck depth self.base_nc = opt.base_nc # base channel size for conv layers self.cond_nc", "= opt.num_down # Encoder feat layer num self.num_up = opt.num_up # Decoder feat", "self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up))", "C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def affine_transformation(self, X, alpha, beta):", "feats_cond rel_feats_diff = [] # use for beta(bias of FiLM) for f_c, f_c_a", "C X W X H) q : aug cond feature maps( B X", "in range(self.num_feats): # attach FiLM source features to main graph instead detach() #", "= None, None if self.out_feats: feats = [] # if self.use_attn: # attn_maps", "FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc,", "assert len(skip_feats) == self.num_up, \"skip feats number mismatch\" if self.in_shapes: if in_shapes is", "up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar", "height).permute(0, 2, 1) # B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1,", "a lot computational burden cond_feats as Key aug_cond_feats as Query image_feats as Value", "shape for inverse self.use_VAE = use_VAE # produce distribution for code self.use_attn =", "= Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None,", "map) and Image(+Noise) params: num_down, base_nc, out_nc return: [features] + [Code] \"\"\" def", "self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM:", "shapes, attn_maps = None, None, None if self.out_feats: feats = [] if self.out_shapes:", "\"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim,", "flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and logvar length # Similar", "attn_layers = [] for i in range(self.num_up): # double channels after reduce spatial", "from big to small(more ncs) self.double = double # whether the rel_feats are", "return main def forward(self, feats): assert len(feats) == self.num_feats params = [] for", "code_c_nc, out_feats=True) # use noise + z_prev instead of torch.cat(noise+prev, prev) as input", "FiLM(Feat_img) -> code_cond # _ denotes out_feats of ecnoder_c is None _, feats_cond,", "torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24,", "out = out.view(m_batchsize, C, width, height) out = self.gamma * out + x", "= 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2)) self.down_block", "- i - 1] if self.input_FiLM: # also apply FiLM params on skip_feats", "loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down #", "= self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) #", "kernel=1, pad=0)] else: block_list = [] # add a resnet block in bottleneck", "brings a lot computational burden cond_feats as Key aug_cond_feats as Query image_feats as", "beta) for fixed-point loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down", "= Encoder's tail block in_nc # self.base_nc * (2 ** self.num_up) = self.base_nc", "self.skip_feats = skip_feats # whether concat skip feats from encoder self.use_attn = use_attn", "and beta # update: remove bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False,", "** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat,", "if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if", "self.opt.D_use_FiLM: # Relative feats between cond and cond_aug rel_feats_ratio = [] # use", "in_shapes # whether interpolate feats according to in_shapes self.skip_feats = skip_feats # whether", "# self.base_nc * (2 ** self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block", "None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds,", "# print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond # _ denotes out_feats", "out : self attention value + input feature attention: B X N X", "# mu and logvar length # Similar to InGAN, increase kernel_size of entry", "+ x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our", "= self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond)", "self.up_block[i](x) if self.in_shapes: # interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) #", "X W X H) k -> q as Transformation returns : out :", "BX (N) X (N) every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1,", "# alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input:", "i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps", "= input_FiLM # whether input FiLMed factors self.out_feats = out_feats # whether output", "X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width,", "feats_cond_aug # Noise + Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps =", "in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth,", "layer num self.num_up = opt.num_up # Decoder feat layer num # self.neck_depth =", "code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM,", "for inverse self.use_VAE = use_VAE # produce distribution for code self.use_attn = use_attn", "use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc,", "alpha, beta): x = X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std", "** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block)", "code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond if", "(W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B X", "add 1 for relative feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return", "= [] for i in range(self.num_up): # double channels after reduce spatial size", "= neck_depth # FiLM layer bottleneck depth self.base_nc = opt.base_nc # base channel", "i in range(self.num_down): if self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:]) x", "# def forward(self, x, k, q): \"\"\" inputs : x : input feature", "= [] # use for alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond,", "# use base_nc * nc_factor // 2 as bottleneck depth # while Guided-pix2pix", "len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps =", "# using input_FiLM as affine transformation self.out_feats = out_feats # output feats self.out_shapes", "def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up", "distribution for code self.use_attn = use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in", "self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative", "replace SFT module, but takes much more memory and brings a lot computational", "= self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat", "reverse # assume feats from big to small(more ncs) self.double = double #", "= [] if noise is not None: input = torch.cat((input, noise), 1) x", "to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for i", "whether output decoder features self.in_shapes = in_shapes # whether interpolate feats according to", "double UpConv input channel, and half output channel # for concating skip feats", "24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest')", "\"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat layer", "= self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view() else: out = self.tail_block(x)", "depth self.base_nc = opt.base_nc # base channel size for conv layers self.cond_nc =", "FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc *", "out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace SFT module,", "main = nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats) == self.num_feats params", "out + x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer)", "self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn:", "# assume feats from big to small(more ncs) self.double = double # whether", "interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10])", "2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block)", "Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc * (2 **", "spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor", "if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats,", "i - 1] if self.input_FiLM: # also apply FiLM params on skip_feats skip_feat", "= Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc,", "D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM", "1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def", "Noise + Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict)", "attach FiLM source features to main graph instead detach() # update: no need", "and Image(+Noise) params: num_down, base_nc, out_nc return: [features] + [Code] \"\"\" def __init__(self,", "X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return alpha", "20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x,", "not None: input = torch.cat((input, noise), 1) x = self.head_block(input) for i in", "# nc_factor nc_factor = 2 ** (num_feats - i) if reverse: nc_factor =", "range(self.num_down): if self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x)", "None, None if self.out_feats: feats = [] if self.out_shapes: shapes = [] #", "nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats) == self.num_feats params = []", "feat_std def affine_transformation(self, X, alpha, beta): x = X.clone() mean, std = self.calc_mean_std(x)", "input = torch.cat((input, noise), 1) x = self.head_block(input) for i in range(self.num_down): if", "= out_nc self.input_FiLM = input_FiLM # whether input FiLMed factors self.out_feats = out_feats", "merge skip feats before UpSample skip_feat = skip_feats[self.num_up - i - 1] if", "bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self,", "feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff =", "x = self.up_block[i](x) if self.in_shapes: # interpolate feature size after UpSample # print(x.shape,", "__init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__()", "k -> q as Transformation returns : out : self attention value +", "for fixed-point loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down =", "B X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out =", "None: input = torch.cat((input, noise), 1) x = self.head_block(input) for i in range(self.num_down):", "def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat layer num", "= nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma =", "assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in", "out_nc return: [features] + [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False,", "= self.softmax(energy) # BX (N) X (N) every pixel has W*H scores proj_value", "keep the original naming \"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc", "self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc,", "if self.use_attn: # attn_maps = [] if noise is not None: input =", "1) x = self.head_block(input) for i in range(self.num_down): if self.out_shapes: # Output feature", "N X N (N is Width*Height) \"\"\" m_batchsize, C, width, height = x.size()", "None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM:", "input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc =", "self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM #", "of decoder to 7 # Due to blurry edges, reduce the tail block", "E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise", "self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug", "height) out = self.gamma * out + x return out, attention class FiLM(BaseNetwork):", "calc_mean_std(self, feat, eps=1e-5): # eps is a small value added to the variance", "= 2 ** (self.num_up - i) if skip_feats: # double UpConv input channel,", "feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1)", "# torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i", "self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block = [] for i", "if self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list =", "kernel_size of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block", "kernel=7, pad=3) down_block = [] for i in range(self.num_down): # double channels after", "relative feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork):", "alpha * ((x - mean) / std) + beta def forward(self, code, skip_feats=None,", "aug cond feature maps( B X C X W X H) k ->", "out = self.reparameterize(mu, logvar) out = out.view() else: out = self.tail_block(x) return out,", "f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = [] # use for", "i - 1], mode='nearest') out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\"", "\"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha,", "params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output:", "store attn_maps # if self.use_attn: # attn_maps = [] if noise is not", "B X C X W X H) k : cond feature maps( B", "return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond", "output feats self.out_shapes = out_shapes # output feats shape for inverse self.use_VAE =", "beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug", ": aug cond feature maps( B X C X W X H) k", "C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean,", "use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return", "before UpSample skip_feat = skip_feats[self.num_up - i - 1] if self.input_FiLM: # also", "None: # merge skip feats before UpSample skip_feat = skip_feats[self.num_up - i -", "# interpolate feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6,", "out.view(m_batchsize, C, width, height) out = self.gamma * out + x return out,", "FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14))", "# FiLM layer bottleneck depth self.base_nc = opt.base_nc # base channel size for", "instead of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM,", "feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps", "instead detach() # update: no need for add 1 for relative feats ratio", "across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2)) self.bottlenecks", "self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu", "out = out.view() else: out = self.tail_block(x) return out, feats, shapes, attn_maps class", "X H) q : aug cond feature maps( B X C X W", "base_nc self.num_feats = num_feats self.reverse = reverse # assume feats from big to", "class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates the scaling and", "FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats", "-> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] #", "params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also", "UpConv input channel, and half output channel # for concating skip feats from", "- 1] if self.input_FiLM: # also apply FiLM params on skip_feats skip_feat =", "shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn:", "as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv", "# do not store attn_maps # if self.use_attn: # attn_maps = [] if", "X C X W X H) k : cond feature maps( B X", "Vb from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation", "Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv =", "nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\" inputs : x : input", "prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False)", "(self.num_up - i) if skip_feats: # double UpConv input channel, and half output", "is Width*Height) \"\"\" m_batchsize, C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1,", "* height) # B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) #", "* 2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc *", "-1, width * height).permute(0, 2, 1) # B X (W*H) X C proj_key", "X W X H) k : cond feature maps( B X C X", "tail block in_nc # self.base_nc * (2 ** self.num_up) = self.base_nc * (2", "opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat layer num self.num_up =", "* nc_factor, int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar to", "channels after reduce spatial size nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc", "C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N,", "self.use_VAE = use_VAE # produce distribution for code self.use_attn = use_attn # use", "if self.out_feats: feats = [] # if self.use_attn: # attn_maps = [] x", "self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for i in range(self.num_down): # double", "Similar to InGAN, increase kernel_size of tail block of decoder to 7 #", "q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat and after FiLM/Attention", "much more memory and brings a lot computational burden cond_feats as Key aug_cond_feats", "around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a,", "pad=0)] else: block_list = [] # add a resnet block in bottleneck layer", "in range(self.num_down): if self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:]) x =", "in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down", "alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for", "X N X N (N is Width*Height) \"\"\" m_batchsize, C, width, height =", "Decoder: apply FiLM alpha and beta # Feat_cond -> alpha, beta alpha_conds =", "# torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12,", "out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out", "for i in range(num_feats): # nc_factor nc_factor = 2 ** (num_feats - i)", "FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None):", "super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc", "beta_conds): # shift rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone() - 1,", "torch.cat((input, noise), 1) x = self.head_block(input) for i in range(self.num_down): if self.out_shapes: #", "W X H) q : aug cond feature maps( B X C X", "cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond # _ denotes", "out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc self.in_nc =", "# BX (N) X (N) every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize,", "= self.gamma * out + x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic", "the rel_feats are concated, instead of diff/ratio bottlenecks = [] for i in", "# _ denotes out_feats of ecnoder_c is None _, feats_cond, _, _ =", "feats from big to small(more ncs) self.double = double # whether the rel_feats", "kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self, feats):", "code_fc # mu and logvar length # Similar to InGAN, increase kernel_size of", "transpose check attention = self.softmax(energy) # BX (N) X (N) every pixel has", "self attention value + input feature attention: B X N X N (N", "== len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) ==", "+ 1) # use base_nc * nc_factor // 2 as bottleneck depth #", "alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img", "using input_FiLM as affine transformation self.out_feats = out_feats # output feats self.out_shapes =", "= code_fc # mu and logvar length # Similar to InGAN, increase kernel_size", "// 2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of tail", "(i + 1) # use base_nc * nc_factor // 2 as bottleneck depth", "takes much more memory and brings a lot computational burden cond_feats as Key", "attn_layers = [] for i in range(self.num_down): # double channels after reduce spatial", "kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats) == self.num_feats", "in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # whether input FiLMed factors self.out_feats", "add a resnet block in bottleneck layer for alpha and beta # update:", "out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module", "= [] for i in range(num_feats): # nc_factor nc_factor = 2 ** (num_feats", "# Out feat before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is not", "1)) out = out.view(m_batchsize, C, width, height) out = self.gamma * out +", "avoid divide-by-zero. size = feat.size() assert (len(size) == 4) N, C = size[:2]", "x, k, q): \"\"\" inputs : x : input feature maps( B X", "* (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block", "(2 ** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc,", "half output channel # for concating skip feats from encoder # torch.cat(feat, skip_feat)", "channel size, commonly 3 self.opt = opt # use FiLM or Cond-Attn code_c_nc,", "to blurry edges, reduce the tail block kernel size back to 3 self.tail_block", "semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map) and", "self.num_up, \"skip feats number mismatch\" if self.in_shapes: if in_shapes is not None: assert", "FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats", "f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug /", "+ eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N,", "tail block kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1)", "if self.out_feats: feats = [] if self.out_shapes: shapes = [] # do not", "class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace SFT module, but takes", "* nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is a", "out.view() else: out = self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\"", "[ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list)", "aug_cond_feats as Query image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__()", "affine_transformation(self, X, alpha, beta): x = X.clone() mean, std = self.calc_mean_std(x) mean =", "= torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX (N) X", "input FiLMed factors self.out_feats = out_feats # whether output decoder features self.in_shapes =", "in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img in Decoder:", "= torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out =", "skip feats from encoder # torch.cat(feat, skip_feat) -> feat_next # 256 -> 64,", "FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of decoder_i is None D_param_dict =", "Output feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x =", "num_up, base_nc, in_nc return: Image U-Net skip connections help little. \"\"\" def __init__(self,", "energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX (N)", "W X H) k : cond feature maps( B X C X W", "\"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn:", "** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise +", "for Image input: feature from encoder parmas: num_up, base_nc, in_nc return: Image U-Net", "# whether concat skip feats from encoder self.use_attn = use_attn # use attention", "self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list = [] # for fixed-point", "\"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"]", "self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map)", "while Guided-pix2pix use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc", "before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is not None: # merge", "channel size, 3 for seg self.im_nc = opt.im_nc # Image channel size, commonly", "(i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5):", "# whether input FiLMed factors self.out_feats = out_feats # whether output decoder features", "-> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor // 2))) else:", "(2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise", "[ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list = [] # add a", "attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None # code_img +", "InGAN, increase kernel_size of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7,", "in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc =", "torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out =", "according to in_shapes self.skip_feats = skip_feats # whether concat skip feats from encoder", "height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) #", "self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor,", "and q_feats mismatch\" feats, shapes, attn_maps = None, None, None if self.out_feats: feats", "Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc return: [features] + [Code] \"\"\"", "code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img =", "None # code_img + FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of decoder_i", "== self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if", "opt.num_down # Encoder feat layer num self.num_up = opt.num_up # Decoder feat layer", "self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up,", "input_FiLM # whether input FiLMed factors self.out_feats = out_feats # whether output decoder", "self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x,", "self.num_down = opt.num_down # Encoder feat layer num self.num_up = opt.num_up # Decoder", "if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up,", "[] # use for beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug):", "return feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x = X.clone() mean, std", "= out_feats # whether output decoder features self.in_shapes = in_shapes # whether interpolate", "Out feat before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is not None:", "size nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers =", "out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or", "k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if", "std.expand_as(x) return alpha * ((x - mean) / std) + beta def forward(self,", "to 7 # Due to blurry edges, reduce the tail block kernel size", "f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug", "opt.base_nc # base channel size for conv layers self.cond_nc = opt.cond_nc # Condition", "3 self.opt = opt # use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc", "to small(more ncs) self.double = double # whether the rel_feats are concated, instead", "nc_factor nc_factor = 2 ** (num_feats - i) if reverse: nc_factor = 2", "bottleneck layer for alpha and beta # update: remove bn in FiLM module", "apply FiLM alpha and beta # Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio)", "[] if self.out_shapes: shapes = [] # do not store attn_maps # if", "out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up self.base_nc = base_nc", "value added to the variance to avoid divide-by-zero. size = feat.size() assert (len(size)", "FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas)", "-1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x", "/ std) + beta def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_()", "input: feature from encoder parmas: num_up, base_nc, in_nc return: Image U-Net skip connections", "X N (N is Width*Height) \"\"\" m_batchsize, C, width, height = x.size() proj_query", "head block out_nc = Encoder's tail block in_nc # self.base_nc * (2 **", "Guided-pix2pix use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc *", "input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img if self.input_FiLM:", "beta # update: remove bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True),", "self.num_up = opt.num_up # Decoder feat layer num # self.neck_depth = neck_depth #", "not self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond) -> Fake_img # _", "skip_feats[self.num_up - i - 1] if self.input_FiLM: # also apply FiLM params on", "# produce distribution for code self.use_attn = use_attn # use attention mechanism if", "down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE:", "2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block =", "Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None,", "for visualization alpha_beta_list = [] # for fixed-point loss in zero-reconstruction for fr,", "self.num_up)) up_block = [] for i in range(self.num_up): nc_factor = 2 ** (self.num_up", "FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond if skip_feats is not", "Attetion module may replace SFT module, but takes much more memory and brings", "torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20])", "proj_key) # transpose check attention = self.softmax(energy) # BX (N) X (N) every", "is not None: input = torch.cat((input, noise), 1) x = self.head_block(input) for i", "- 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None,", "InGAN, increase kernel_size of tail block of decoder to 7 # Due to", "pad=1) if self.use_attn: attn_layers = [] for i in range(self.num_up): # double channels", "_ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list, alpha_beta_list", "class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map) and Image(+Noise) params: num_down,", "for i in range(self.num_up): nc_factor = 2 ** (self.num_up - i) if skip_feats:", "Similar to InGAN, increase kernel_size of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc,", "W X H) k -> q as Transformation returns : out : self", "is None _, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _, _ =", "ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for i in range(self.num_down): # double", "if noise is not None: input = torch.cat((input, noise), 1) x = self.head_block(input)", "help little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False,", "from encoder parmas: num_up, base_nc, in_nc return: Image U-Net skip connections help little.", "self.num_feats params = [] for i in range(self.num_feats): # attach FiLM source features", "ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG", "out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax", "self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev instead of torch.cat(noise+prev, prev)", "Condition channel size, 3 for seg self.im_nc = opt.im_nc # Image channel size,", "bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self,", "nc_factor, self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block =", "* (2 ** self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc,", "# while Guided-pix2pix use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor,", "1, 1) return feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x = X.clone()", "/ std) + beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None):", "feature attention: B X N X N (N is Width*Height) \"\"\" m_batchsize, C,", "if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond and cond_aug rel_feats_ratio =", "for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict", "Encoder's tail block in_nc # self.base_nc * (2 ** self.num_up) = self.base_nc *", "1) # B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width *", "[] # do not store attn_maps # if self.use_attn: # attn_maps = []", "memory and brings a lot computational burden cond_feats as Key aug_cond_feats as Query", "Key aug_cond_feats as Query image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn,", "update: no need for add 1 for relative feats ratio # alpha &", "double channels after reduce spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc *", "out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM", "code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc * (2 ** self.num_down)", "size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1,", "nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2", "B X C X W X H) q : aug cond feature maps(", "* ((x - mean) / std) + beta def reparameterize(self, mu, logvar): if", "use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length", "feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return", "nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\" inputs :", "feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None #", "a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around", "# update: no need for add 1 for relative feats ratio # alpha", "self.reparameterize(mu, logvar) out = out.view() else: out = self.tail_block(x) return out, feats, shapes,", "mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return alpha *", "main graph instead detach() # update: no need for add 1 for relative", "num self.num_up = opt.num_up # Decoder feat layer num # self.neck_depth = neck_depth", "reduce spatial size nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor))", "-1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C,", "input feature maps( B X C X W X H) k : cond", "a small value added to the variance to avoid divide-by-zero. size = feat.size()", "= [] # for fixed-point loss in zero-reconstruction for fr, fd, a, b", "# Decoder feat layer num # self.neck_depth = neck_depth # FiLM layer bottleneck", "channels after reduce spatial size nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc", "self.use_attn = use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in #", "skip_feats is not None: # merge skip feats before UpSample skip_feat = skip_feats[self.num_up", "self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block = [] for", "feats, shapes, attn_maps = None, None, None if self.out_feats: feats = [] if", "reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape,", "skip_feat = skip_feats[self.num_up - i - 1] if self.input_FiLM: # also apply FiLM", "= ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block = [] for i in", "(N) every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)", "the tail block kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3,", "is not None: assert len(skip_feats) == self.num_up, \"skip feats number mismatch\" if self.in_shapes:", "update: remove bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth,", "feats_img = None # code_img + FiLM(Feat_cond) -> Fake_img # _ denotes out_feats", "x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version", "ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas", "the scaling and shiftting, just keep the original naming \"\"\" def __init__(self, base_nc,", "fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha", "def forward(self, x, k, q): \"\"\" inputs : x : input feature maps(", "whether the rel_feats are concated, instead of diff/ratio bottlenecks = [] for i", "* out + x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation", "= nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\" inputs : x :", "skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i])", "# use for alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a", "import * class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map) and Image(+Noise)", "# code: code_img or code_cond if skip_feats is not None: assert len(skip_feats) ==", "is not None: # merge skip feats before UpSample skip_feat = skip_feats[self.num_up -", "num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False): super().__init__() self.num_up = num_up", "Out feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x)", "# torch.cat(feat, skip_feat) -> feat_next # 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc", "B X C X W X H) k -> q as Transformation returns", "D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or", "out_nc self.input_FiLM = input_FiLM # using input_FiLM as affine transformation self.out_feats = out_feats", "FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x =", "skip feats from encoder self.use_attn = use_attn # use attention mechanism # Decoder's", "zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift", "torch.autograd import Variable as Vb from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder", "64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) #", "variance to avoid divide-by-zero. size = feat.size() assert (len(size) == 4) N, C", "block of decoder to 7 # Due to blurry edges, reduce the tail", "or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc * (2", "= feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean", "= [] # for visualization alpha_beta_list = [] # for fixed-point loss in", "alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"]", "= num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM =", "base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse =", "is a small value added to the variance to avoid divide-by-zero. size =", "self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up,", "nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers)", "size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor *", "fixed-point loss in zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds,", "number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas)", "self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers = []", "beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img ->", "1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std", "- i) if skip_feats: # double UpConv input channel, and half output channel", "self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps = None, None if self.out_feats: feats", "if self.use_attn: # attn_maps = [] x = self.head_block(code) for i in range(self.num_up):", "== len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps = None,", "- i) if reverse: nc_factor = 2 ** (i + 1) # use", "== len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps = None, None", "resnet block in bottleneck layer for alpha and beta # update: remove bn", "\"\"\" def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats =", "return: [features] + [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False,", "# double channels after reduce spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc", "edges, reduce the tail block kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc,", "if self.input_FiLM: # also apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i],", "mean) / std) + beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None,", "self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True)", "None _, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug)", "code_img + FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of decoder_i is None", "is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if", "= feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img, code_img code_i, feats_img, _,", "self.gamma * out + x return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature", "C X W X H) k : cond feature maps( B X C", "attn_maps # if self.use_attn: # attn_maps = [] if noise is not None:", "parmas: num_up, base_nc, in_nc return: Image U-Net skip connections help little. \"\"\" def", "commonly 3 self.opt = opt # use FiLM or Cond-Attn code_c_nc, code_i_nc =", "= self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i])", "size, 3 for seg self.im_nc = opt.im_nc # Image channel size, commonly 3", "def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1,", "len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats)", "Image channel size, commonly 3 self.opt = opt # use FiLM or Cond-Attn", "use noise + z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down,", "**E_param_dict) if not self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond) -> Fake_img", "eps is a small value added to the variance to avoid divide-by-zero. size", "to InGAN, increase kernel_size of tail block of decoder to 7 # Due", "fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to", "nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def", "* (2 ** self.num_up)) up_block = [] for i in range(self.num_up): nc_factor =", "F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest') out = self.tail_block(x) return out, feats", "in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //", "logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view() else: out =", "remove bn in FiLM module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)),", "UpSample skip_feat = skip_feats[self.num_up - i - 1] if self.input_FiLM: # also apply", "rel_feats_ratio = [] # use for alpha(multiplier of FiLM) for f_c, f_c_a in", "assert len(feats) == self.num_feats params = [] for i in range(self.num_feats): # attach", "attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: # interpolate", "logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return", "for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio,", "denotes out_feats of ecnoder_c is None _, feats_cond, _, _ = self.encoder_c(cond) _,", "channels after reduce spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor,", "self.out_shapes = out_shapes # output feats shape for inverse self.use_VAE = use_VAE #", "__init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse", "noise is not None: input = torch.cat((input, noise), 1) x = self.head_block(input) for", "input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point loss and", "self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps = None, None, None if", "value + input feature attention: B X N X N (N is Width*Height)", "None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if", "loss in zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds):", "12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x =", "feats, attn_maps = None, None if self.out_feats: feats = [] # if self.use_attn:", "nc_factor = 2 ** (num_feats - i) if reverse: nc_factor = 2 **", "m_batchsize, C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0,", "input_FiLM as affine transformation self.out_feats = out_feats # output feats self.out_shapes = out_shapes", "main def forward(self, feats): assert len(feats) == self.num_feats params = [] for i", "kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k, q):", "if self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view()", "cond feature maps( B X C X W X H) q : aug", "skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: # interpolate feature size after UpSample", "- 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None,", "1) x = self.up_block[i](x) if self.in_shapes: # interpolate feature size after UpSample #", "C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2,", "def calc_mean_std(self, feat, eps=1e-5): # eps is a small value added to the", "feat layer num self.num_up = opt.num_up # Decoder feat layer num # self.neck_depth", "num_up self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM", "skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta", "self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is a small value", "out_nc = Encoder's tail block in_nc # self.base_nc * (2 ** self.num_up) =", "i) if reverse: nc_factor = 2 ** (i + 1) # use base_nc", "base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down =", "\"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn:", "2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of tail block", "range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map =", "= torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: # interpolate feature size", "to around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()])", "proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X (W*H)", "None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse()", "// bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax =", "instead of diff/ratio bottlenecks = [] for i in range(num_feats): # nc_factor nc_factor", "i) if skip_feats: # double UpConv input channel, and half output channel #", "def __init__(self, base_nc, num_feats, double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats", "maps( B X C X W X H) q : aug cond feature", "= size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C,", "kernel size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn:", "# Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list =", "// 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list =", "nc, kernel=1, pad=0)] else: block_list = [] # add a resnet block in", "= in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim", "self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps", "_, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats", "else: block_list = [] # add a resnet block in bottleneck layer for", "i in range(num_feats): # nc_factor nc_factor = 2 ** (num_feats - i) if", "self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for i in range(self.num_down):", "1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps", "not None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes", "mismatch\" feats, attn_maps = None, None if self.out_feats: feats = [] # if", "__init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //", "= feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM:", "need for add 1 for relative feats ratio # alpha & beta separate", "mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas) ==", "as Transformation returns : out : self attention value + input feature attention:", "self.opt = opt # use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc *", "= num_feats self.reverse = reverse # assume feats from big to small(more ncs)", "nc_factor * 2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc", "nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else:", "2, 1) # B X (W*H) X C proj_key = self.key_conv(k).view(m_batchsize, -1, width", "q_feats=None): # input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) ==", "reduce spatial size nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc *", "cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, \"FiLM_alphas and", "= num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM =", "use base_nc * nc_factor // 2 as bottleneck depth # while Guided-pix2pix use", ": out : self attention value + input feature attention: B X N", "rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone()", "as Query image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in", "if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond,", "width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1)", "Width*Height) \"\"\" m_batchsize, C, width, height = x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width", "self.head_block(input) for i in range(self.num_down): if self.out_shapes: # Output feature shape before DownSample", "self.use_attn: attn_layers = [] for i in range(self.num_up): # double channels after reduce", "torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25, 40]) x = F.interpolate(x, size=in_shapes[self.num_up", "has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X", "[] # use for alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug):", "2 ** (self.num_up - i) if skip_feats: # double UpConv input channel, and", "Query image_feats as Value \"\"\" def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in =", "# code_img + FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of decoder_i is", "1) # use base_nc * nc_factor // 2 as bottleneck depth # while", "self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if", "nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv", "feats_cond_aug # cond2img in Decoder: apply FiLM alpha and beta # Feat_cond ->", "num_down, base_nc, out_nc return: [features] + [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc,", "out_shapes # output feats shape for inverse self.use_VAE = use_VAE # produce distribution", "return out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates", "= opt.im_nc # Image channel size, commonly 3 self.opt = opt # use", "self.use_VAE: mu, logvar = self.tail_block(x) out = self.reparameterize(mu, logvar) out = out.view() else:", "= Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev instead of", "feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict)", "if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug # Noise + Prev_img -> Feat_img,", "all feature maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2)) self.bottlenecks =", "attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat and after FiLM/Attention feats.append(x) if", "= std.expand_as(x) return alpha * ((x - mean) / std) + beta def", "= x.size() proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B", "out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc", "attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): # eps is", "whether concat skip feats from encoder self.use_attn = use_attn # use attention mechanism", "= self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond and cond_aug", "layer) Our version seperates the scaling and shiftting, just keep the original naming", "= in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # whether input FiLMed factors", ": self attention value + input feature attention: B X N X N", "proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N", "super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse = reverse # assume feats", "C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N,", "[] # add a resnet block in bottleneck layer for alpha and beta", "entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = []", "fake_image(tgt_img) also output FiLM parameters(alpha, beta) for fixed-point loss and visualization \"\"\" def", "attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat after", "encoder # torch.cat(feat, skip_feat) -> feat_next # 256 -> 64, 128 -> 32", "self.key_conv(k).view(m_batchsize, -1, width * height) # B X C x (*W*H) energy =", "nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list", "* (2 ** self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use", "size nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers =", "if skip_feats is not None: # merge skip feats before UpSample skip_feat =", "i in range(self.num_down): # double channels after reduce spatial size nc_factor = 2", "scaling and shiftting, just keep the original naming \"\"\" def __init__(self, base_nc, num_feats,", "attn_maps = [] x = self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x", "* height) # B X C X N out = torch.bmm(proj_value, attention.permute(0, 2,", "[] for i in range(self.num_down): # double channels after reduce spatial size nc_factor", "in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor,", "** (self.num_up - i) if skip_feats: # double UpConv input channel, and half", "X H) k : cond feature maps( B X C X W X", "self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat),", "if reverse: nc_factor = 2 ** (i + 1) # use base_nc *", "= self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return alpha * ((x -", ": cond feature maps( B X C X W X H) q :", "-> Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip:", "-1, width * height) # B X C x (*W*H) energy = torch.bmm(proj_query,", "assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps", "input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta =", "logvar length # Similar to InGAN, increase kernel_size of entry block to 7", "if self.use_attn: attn_layers = [] for i in range(self.num_up): # double channels after", "-> Fake_img # _ denotes out_feats of decoder_i is None D_param_dict = {\"FiLM_alphas\":", "alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if", "(2 ** self.num_up) = self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc", "Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\"", "code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up,", "use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main", "self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img,", "feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img,", "mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\"", "for alpha and beta # update: remove bn in FiLM module block_list +=", "in range(self.num_up): # double channels after reduce spatial size nc_factor = 2 **", "D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug", "2 as bottleneck depth # while Guided-pix2pix use fixed 100 across all feature", "depth # while Guided-pix2pix use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc *", "use_VAE=False, use_attn=False, code_in=None, code_fc=None): super().__init__() self.num_down = num_down self.base_nc = base_nc self.in_nc =", "in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around 0 for", "torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2, nc,", "self.vae_tail_fc_nc = code_fc # mu and logvar length # Similar to InGAN, increase", "not None: # merge skip feats before UpSample skip_feat = skip_feats[self.num_up - i", "C proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B X C x", "feature maps( B X C X W X H) k -> q as", "print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12,", "netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta)", "to the variance to avoid divide-by-zero. size = feat.size() assert (len(size) == 4)", "mean) / std) + beta def reparameterize(self, mu, logvar): if self.training: std =", "in range(self.num_up): nc_factor = 2 ** (self.num_up - i) if skip_feats: # double", "feats from encoder self.use_attn = use_attn # use attention mechanism # Decoder's head", "from semia.network import * class Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map)", "feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def affine_transformation(self, X, alpha,", "std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self,", "mismatch\" if self.in_shapes: if in_shapes is not None: assert len(in_shapes) == self.num_up, \"in_shapes", "after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar = self.tail_block(x) out =", "* nc_factor, base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc,", "return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img)", "attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from encoder parmas: num_up,", "self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # using input_FiLM as", "\"\"\" Encoder for both Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc", "if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats,", "cond_feats as Key aug_cond_feats as Query image_feats as Value \"\"\" def __init__(self, in_dim,", "code_img or code_cond if skip_feats is not None: assert len(skip_feats) == self.num_up, \"skip", "= [] # use for beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond,", "cond feature maps( B X C X W X H) k -> q", "if self.out_feats: # Out feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu,", "None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not", "torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True,", "up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc *", "= feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def affine_transformation(self, X,", "graph instead detach() # update: no need for add 1 for relative feats", "reverse: nc_factor = 2 ** (i + 1) # use base_nc * nc_factor", "q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample and FiLM/Attention feats.append(x)", "_ = self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond and", "self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond) ->", "on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: skip_feat, attn_map = self.attn_layers[i](skip_feat,", "the variance to avoid divide-by-zero. size = feat.size() assert (len(size) == 4) N,", "after reduce spatial size nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc *", "\"\"\" Cond-Attention Module Attetion module may replace SFT module, but takes much more", "alpha and beta # update: remove bn in FiLM module block_list += [ResnetBlock(nc,", "\"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None,", "little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False):", "FiLMModule(Semantic Feature Translation layer) Our version seperates the scaling and shiftting, just keep", "SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM", "Cond-Attention Module Attetion module may replace SFT module, but takes much more memory", "len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats)", "= FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None,", "Encoder(BaseNetwork): \"\"\" Encoder for both Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc,", "skip_feat) -> feat_next # 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc * nc_factor", "if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_())", "self.encoder_c(cond_aug) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond and cond_aug rel_feats_ratio", "range(self.num_up): nc_factor = 2 ** (self.num_up - i) if skip_feats: # double UpConv", "feature maps( B X C X W X H) k : cond feature", "+ beta def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps =", "X, alpha, beta): x = X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x)", "Decoder for Image input: feature from encoder parmas: num_up, base_nc, in_nc return: Image", "q_feats mismatch\" feats, shapes, attn_maps = None, None, None if self.out_feats: feats =", "print(x.shape, cond.shape) # Condition + FiLM(Feat_img) -> code_cond # _ denotes out_feats of", "but takes much more memory and brings a lot computational burden cond_feats as", "_ denotes out_feats of ecnoder_c is None _, feats_cond, _, _ = self.encoder_c(cond)", "k_feats=None, q_feats=None): # code: code_img or code_cond if skip_feats is not None: assert", "# torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1, 16, 24, 40]) torch.Size([25,", "= FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) #", "i in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x,", "bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1)", "whether input FiLMed factors self.out_feats = out_feats # whether output decoder features self.in_shapes", "[] for i in range(self.num_feats): # attach FiLM source features to main graph", "decoder features self.in_shapes = in_shapes # whether interpolate feats according to in_shapes self.skip_feats", "* nc_factor, self.base_nc * nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block", "self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev instead of torch.cat(noise+prev, prev) as", "i in range(self.num_up): # double channels after reduce spatial size nc_factor = 2", "self.opt.E_use_FiLM or self.opt.D_use_FiLM: # Relative feats between cond and cond_aug rel_feats_ratio = []", "+ 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = [] # use for beta(bias", "features to main graph instead detach() # update: no need for add 1", "self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out", "features self.in_shapes = in_shapes # whether interpolate feats according to in_shapes self.skip_feats =", "kernel_size of tail block of decoder to 7 # Due to blurry edges,", "module block_list += [ResnetBlock(nc, use_bn=False, use_bias=True), nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)), nn.ReLU(True), nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))]", "neck_depth # FiLM layer bottleneck depth self.base_nc = opt.base_nc # base channel size", "nc_factor = 2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2))", "Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = []", "q : aug cond feature maps( B X C X W X H)", "nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of", "block_list = [] # add a resnet block in bottleneck layer for alpha", "block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list = [] #", "and after FiLM/Attention feats.append(x) if skip_feats is not None: # merge skip feats", "std) + beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): #", "def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img) ->", "FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug", "for fixed-point loss in zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff,", "# Due to blurry edges, reduce the tail block kernel size back to", "base_nc * nc_factor // 2 as bottleneck depth # while Guided-pix2pix use fixed", "output decoder features self.in_shapes = in_shapes # whether interpolate feats according to in_shapes", "= self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list = [] # for", "self.out_shapes: shapes = [] # do not store attn_maps # if self.use_attn: #", "= feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)", "ncs) self.double = double # whether the rel_feats are concated, instead of diff/ratio", "double=False, reverse=False): super().__init__() self.base_nc = base_nc self.num_feats = num_feats self.reverse = reverse #", "self.im_nc = opt.im_nc # Image channel size, commonly 3 self.opt = opt #", "q): \"\"\" inputs : x : input feature maps( B X C X", "size for conv layers self.cond_nc = opt.cond_nc # Condition channel size, 3 for", "FiLM source features to main graph instead detach() # update: no need for", "feats = [] if self.out_shapes: shapes = [] # do not store attn_maps", "self.use_attn: # attn_maps = [] x = self.head_block(code) for i in range(self.num_up): if", "FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition", "ecnoder_c is None _, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _, _", "q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes:", "forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond", "0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b])", "# feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply FiLM alpha and beta", "# double UpConv input channel, and half output channel # for concating skip", "x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) #", "self.out_feats: feats = [] # if self.use_attn: # attn_maps = [] x =", "range(self.num_down): # double channels after reduce spatial size nc_factor = 2 ** i", "fixed-point loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down", "base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # using input_FiLM", "alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c", "Our version seperates the scaling and shiftting, just keep the original naming \"\"\"", "# self.neck_depth = neck_depth # FiLM layer bottleneck depth self.base_nc = opt.base_nc #", "and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder", "code self.use_attn = use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in", "int(self.base_nc * nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase", "increase kernel_size of entry block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3)", "in_nc # self.base_nc * (2 ** self.num_up) = self.base_nc * (2 ** self.num_down)", "torch.cat(feat, skip_feat) -> feat_next # 256 -> 64, 128 -> 32 up_block.append(UpConvBlock(self.base_nc *", "= {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"]", "# Image channel size, commonly 3 self.opt = opt # use FiLM or", "use for beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c))", "2 ** i down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2)) self.down_block =", "[] if noise is not None: input = torch.cat((input, noise), 1) x =", "k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat before UpSample/Concat and after", "X (N) every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width *", "i in range(self.num_feats): # attach FiLM source features to main graph instead detach()", "forward(self, x, k, q): \"\"\" inputs : x : input feature maps( B", "block in_nc # self.base_nc * (2 ** self.num_up) = self.base_nc * (2 **", "_, feats_cond, _, _ = self.encoder_c(cond) _, feats_cond_aug, _, _ = self.encoder_c(cond_aug) if", "decoder_i is None D_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None}", "attention mechanism # Decoder's head block out_nc = Encoder's tail block in_nc #", "for beta(bias of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) #", "** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block = []", "+ 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): #", "1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = [] # use", "eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None,", "(2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for i in range(self.num_down):", "+ Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if", "alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list", "scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X", "Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not", "= nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size of tail block of decoder", "diff/ratio bottlenecks = [] for i in range(num_feats): # nc_factor nc_factor = 2", "feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return", "**D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list, alpha_beta_list else: return fake_img, attn_maps", "# if self.use_attn: # attn_maps = [] if noise is not None: input", "shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i],", "ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for i", "in_shapes is not None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch self.num_up\" else:", "= use_VAE # produce distribution for code self.use_attn = use_attn # use attention", "** (num_feats - i) if reverse: nc_factor = 2 ** (i + 1)", "x = X.clone() mean, std = self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x)", "code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img = None", "= nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x, k, q): \"\"\" inputs", "Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev instead of torch.cat(noise+prev,", "self.out_feats = out_feats # output feats self.out_shapes = out_shapes # output feats shape", "= skip_feats[self.num_up - i - 1] if self.input_FiLM: # also apply FiLM params", "# for fixed-point loss in zero-reconstruction for fr, fd, a, b in zip(rel_feats_ratio,", "feat.size() assert (len(size) == 4) N, C = size[:2] feat_var = feat.view(N, C,", "if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse()", "x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i])", "returns : out : self attention value + input feature attention: B X", "self.num_down) self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True) # use noise + z_prev", "b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM:", "beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg),", "X C X W X H) k -> q as Transformation returns :", "size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10])", "Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict) if not self.opt.D_use_skip: feats_img", "* nc_factor * 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc)", "size=in_shapes[self.num_up - i - 1], mode='nearest') out = self.tail_block(x) return out, feats class", "if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if", "x = torch.cat((x, skip_feat), 1) x = self.up_block[i](x) if self.in_shapes: # interpolate feature", "maps( B X C X W X H) k -> q as Transformation", "* nc_factor // 2))) self.up_block = nn.ModuleList(up_block) # Similar to InGAN, increase kernel_size", "// 2 as bottleneck depth # while Guided-pix2pix use fixed 100 across all", "also apply FiLM params on skip_feats skip_feat = self.affine_transformation(skip_feat, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn:", "self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace", "return eps.mul(std).add_(mu) else: return Vb(mu.data.new(mu.size()).normal_()) def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None):", "parameters(alpha, beta) for fixed-point loss and visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__()", "self.base_nc * (2 ** self.num_up)) up_block = [] for i in range(self.num_up): nc_factor", "f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img", "assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats and q_feats mismatch\" feats, attn_maps =", "# print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1, 32,", "# base channel size for conv layers self.cond_nc = opt.cond_nc # Condition channel", "nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats)", "and beta # Feat_cond -> alpha, beta alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff)", "for relative feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class", "VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn:", "return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature", "of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c +", "self.base_nc, kernel=7, pad=3) down_block = [] for i in range(self.num_down): # double channels", "= mean.expand_as(x) std = std.expand_as(x) return alpha * ((x - mean) / std)", "to in_shapes self.skip_feats = skip_feats # whether concat skip feats from encoder self.use_attn", "feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def affine_transformation(self,", "= out_shapes # output feats shape for inverse self.use_VAE = use_VAE # produce", "alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone() -", "if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i,", "self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn:", "None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"],", "(N) X (N) every pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width", "length self.vae_tail_fc_nc = code_fc # mu and logvar length # Similar to InGAN,", "self.use_attn = use_attn # use attention mechanism # Decoder's head block out_nc =", "alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _", "FiLM_betas=None, k_feats=None, q_feats=None): # input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) ==", "as affine transformation self.out_feats = out_feats # output feats self.out_shapes = out_shapes #", "tail block of decoder to 7 # Due to blurry edges, reduce the", "= nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats) == self.num_feats params =", "B X N X N (N is Width*Height) \"\"\" m_batchsize, C, width, height", "C X W X H) k -> q as Transformation returns : out", "X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention =", "skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list, alpha_beta_list else: return fake_img,", "to avoid divide-by-zero. size = feat.size() assert (len(size) == 4) N, C =", "1], mode='nearest') out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module", "reverse=True) def forward(self, x, cond=None, cond_aug=None): # print(x.shape, cond.shape) # Condition + FiLM(Feat_img)", "of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn,", "feature size after UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6,", "X W X H) q : aug cond feature maps( B X C", "self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip,", "- 1], mode='nearest') out = self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention", "conv layers self.cond_nc = opt.cond_nc # Condition channel size, 3 for seg self.im_nc", "if not self.opt.D_use_skip: feats_img = None # code_img + FiLM(Feat_cond) -> Fake_img #", "self.num_up, \"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM:", "(2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up)) up_block =", "+ z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc,", "Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may replace SFT module, but takes much", "1, fd.clone(), a.clone() - 1, b.clone()]) alpha_beta_list.append([a, b]) E_param_dict = {\"FiLM_alphas\": None, \"FiLM_betas\":", "self.base_nc = opt.base_nc # base channel size for conv layers self.cond_nc = opt.cond_nc", "H) k : cond feature maps( B X C X W X H)", "\"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.E_use_FiLM: E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds", "block to 7 self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3) down_block = [] for", "bottlenecks = [] for i in range(num_feats): # nc_factor nc_factor = 2 **", "- i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self, feat, eps=1e-5): #", "self.out_feats: # Out feat before UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is", "# attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample and FiLM/Attention feats.append(x) if", "after FiLM/Attention feats.append(x) if skip_feats is not None: # merge skip feats before", "beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ =", "self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self, x, cond=None, cond_aug=None): #", "Image(+Noise) params: num_down, base_nc, out_nc return: [features] + [Code] \"\"\" def __init__(self, num_down,", "for i in range(self.num_down): if self.out_shapes: # Output feature shape before DownSample shapes.append(x.shape[-2:])", "2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def calc_mean_std(self,", "opt.cond_nc # Condition channel size, 3 for seg self.im_nc = opt.im_nc # Image", "self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True) self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True) def forward(self,", "self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X (W*H) X C", "num_feats self.reverse = reverse # assume feats from big to small(more ncs) self.double", "= in_shapes # whether interpolate feats according to in_shapes self.skip_feats = skip_feats #", "spatial size nc_factor = 2 ** (i + 1) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers", "small(more ncs) self.double = double # whether the rel_feats are concated, instead of", "logvar) out = out.view() else: out = self.tail_block(x) return out, feats, shapes, attn_maps", "# attn_maps = [] x = self.head_block(code) for i in range(self.num_up): if self.input_FiLM:", "len(skip_feats) == self.num_up, \"skip feats number mismatch\" if self.in_shapes: if in_shapes is not", "self.bottlenecks = torch.nn.ModuleList(bottlenecks) def bottleneck_layer(self, nc, bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc *", "self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc", "# Out feat after DownSample and FiLM/Attention feats.append(x) if self.use_VAE: mu, logvar =", "len(feats) == self.num_feats params = [] for i in range(self.num_feats): # attach FiLM", "bottleneck_depth): if self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list", "encoder parmas: num_up, base_nc, in_nc return: Image U-Net skip connections help little. \"\"\"", "+ FiLM(Feat_img) -> code_cond # _ denotes out_feats of ecnoder_c is None _,", "feats according to in_shapes self.skip_feats = skip_feats # whether concat skip feats from", "self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down), self.out_nc) if self.use_attn: attn_layers", "== self.num_down, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) ==", "skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) x = torch.cat((x, skip_feat), 1)", "def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False, use_attn=False, code_in=None, code_fc=None):", "(*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX", "feats ratio # alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\"", "attn_maps = None, None, None if self.out_feats: feats = [] if self.out_shapes: shapes", "in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or code_cond if skip_feats is", "cond.shape) # Condition + FiLM(Feat_img) -> code_cond # _ denotes out_feats of ecnoder_c", "output feats shape for inverse self.use_VAE = use_VAE # produce distribution for code", "range(self.num_down): # double channels after reduce spatial size nc_factor = 2 ** (i", "= 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers = nn.ModuleList(attn_layers) def", "opt.im_nc # Image channel size, commonly 3 self.opt = opt # use FiLM", "zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds): # shift rel_feats_ratio, alpha to around 0 for visualization", "and shiftting, just keep the original naming \"\"\" def __init__(self, base_nc, num_feats, double=False,", "bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim,", "after reduce spatial size nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc *", "= self.base_nc * (2 ** self.num_down) self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 **", "(len(size) == 4) N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) +", "(2 ** self.num_up)) up_block = [] for i in range(self.num_up): nc_factor = 2", "= self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: return fake_img, rel_feats_list, alpha_beta_list else:", "# output feats self.out_shapes = out_shapes # output feats shape for inverse self.use_VAE", "= self.reparameterize(mu, logvar) out = out.view() else: out = self.tail_block(x) return out, feats,", "out_feats # output feats self.out_shapes = out_shapes # output feats shape for inverse", "self.in_shapes: if in_shapes is not None: assert len(in_shapes) == self.num_up, \"in_shapes number mismatch", "use FiLM or Cond-Attn code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc", "self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas)", "= [] x = self.head_block(code) for i in range(self.num_up): if self.input_FiLM: x =", "rel_feats_diff = [] # use for beta(bias of FiLM) for f_c, f_c_a in", "* 2)) self.down_block = nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block", "N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std =", "= [] for i in range(self.num_up): nc_factor = 2 ** (self.num_up - i)", "height) # B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose", "forward(self, feats): assert len(feats) == self.num_feats params = [] for i in range(self.num_feats):", "std.expand_as(x) return alpha * ((x - mean) / std) + beta def reparameterize(self,", "std = self.calc_mean_std(x) mean = mean.expand_as(x) std = std.expand_as(x) return alpha * ((x", "as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc, input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn, out_feats=True, out_shapes=False) self.decoder_i", "UpSample # print(x.shape, in_shapes[self.num_up-i-1]) # torch.Size([1, 64, 6, 10]) torch.Size([6, 10]) # torch.Size([1,", "self.base_nc * (2 ** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c = Encoder(self.num_down,", "code_in # the flattened feat_cond(smallest) length self.vae_tail_fc_nc = code_fc # mu and logvar", "out = self.gamma * out + x return out, attention class FiLM(BaseNetwork): \"\"\"", "1) return feat_mean, feat_std def affine_transformation(self, X, alpha, beta): x = X.clone() mean,", "zip(feats_cond, feats_cond_aug): rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply", "class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from encoder parmas: num_up, base_nc,", "out, attention class FiLM(BaseNetwork): \"\"\" FiLMModule(Semantic Feature Translation layer) Our version seperates the", "not in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, \"FiLM_alphas and", "# input: cond or prev_img if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down,", "feats between cond and cond_aug rel_feats_ratio = [] # use for alpha(multiplier of", "len(q_feats) == self.num_down, \"k_feats and q_feats mismatch\" feats, shapes, attn_maps = None, None,", "x = self.down_block[i](x) if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x,", "= None # code_img + FiLM(Feat_cond) -> Fake_img # _ denotes out_feats of", "-f_c)) # feats_cond_aug - feats_cond_aug # cond2img in Decoder: apply FiLM alpha and", "range(self.num_up): # double channels after reduce spatial size nc_factor = 2 ** (self.num_up", "self.attn_layers[i](x, k_feats[i], q_feats[i]) # attn_maps.append(attn_map) if self.out_feats: # Out feat after DownSample and", "transformation self.out_feats = out_feats # output feats self.out_shapes = out_shapes # output feats", "double channels after reduce spatial size nc_factor = 2 ** (self.num_up - i)", "module may replace SFT module, but takes much more memory and brings a", "else: out = self.tail_block(x) return out, feats, shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder", "def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code: code_img or", "beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] =", "6, 10]) torch.Size([6, 10]) # torch.Size([1, 32, 12, 20]) torch.Size([12, 20]) # torch.Size([1,", "for alpha(multiplier of FiLM) for f_c, f_c_a in zip(feats_cond, feats_cond_aug): rel_feats_ratio.append(torch.div(f_c_a + 1e-14,", "Translation layer) Our version seperates the scaling and shiftting, just keep the original", "for i in range(self.num_feats): # attach FiLM source features to main graph instead", "def __init__(self, in_dim, bottleneck_factor=32): super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim", "** self.num_down), self.out_nc) if self.use_attn: attn_layers = [] for i in range(self.num_down): #", "channel, and half output channel # for concating skip feats from encoder #", "nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1))", "input channel, and half output channel # for concating skip feats from encoder", "Condition Signal(Segmentation map) and Image(+Noise) params: num_down, base_nc, out_nc return: [features] + [Code]", "= use_attn # use attention mechanism if self.use_VAE: self.vae_tail_fc_in = code_in # the", "UpSample/Concat and after FiLM/Attention feats.append(x) if skip_feats is not None: # merge skip", "X C X W X H) q : aug cond feature maps( B", "length # Similar to InGAN, increase kernel_size of entry block to 7 self.head_block", "size back to 3 self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers", "code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc * (2 ** self.num_down) self.encoder_c", "out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.value_conv =", "nc, kernel_size=1))] main = nn.Sequential(*block_list) return main def forward(self, feats): assert len(feats) ==", "+ beta def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None): # code:", "maps bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2)) self.bottlenecks = torch.nn.ModuleList(bottlenecks) def", "None, \"FiLM_betas\": None, \"k_feats\": None, \"q_feats\": None} if self.opt.D_use_FiLM: alpha_conds.reverse() beta_conds.reverse() D_param_dict[\"FiLM_alphas\"], D_param_dict[\"FiLM_betas\"]", "[features] + [Code] \"\"\" def __init__(self, num_down, base_nc, in_nc, out_nc, input_FiLM=False, out_feats=False, out_shapes=False,", "= nn.ModuleList(down_block) if self.use_VAE: self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc) else: self.tail_block = ConvBaseBlock(self.base_nc *", "of tail block of decoder to 7 # Due to blurry edges, reduce", "skip feats before UpSample skip_feat = skip_feats[self.num_up - i - 1] if self.input_FiLM:", "rel_feats_ratio, alpha to around 0 for visualization rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() -", "base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM # whether input", "num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM = input_FiLM", "self.out_feats = out_feats # whether output decoder features self.in_shapes = in_shapes # whether", "self.double: block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)] else: block_list = []", "up_block = [] for i in range(self.num_up): nc_factor = 2 ** (self.num_up -", "= self.FiLM_c2i_alpha(rel_feats_ratio) beta_conds = self.FiLM_c2i_beta(rel_feats_diff) rel_feats_list = [] # for visualization alpha_beta_list =", "bottleneck depth # while Guided-pix2pix use fixed 100 across all feature maps bottlenecks.append(self.bottleneck_layer(base_nc", "self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM: self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True)", "self.input_FiLM = input_FiLM # whether input FiLMed factors self.out_feats = out_feats # whether", "= [] for i in range(self.num_feats): # attach FiLM source features to main", "self.num_down = num_down self.base_nc = base_nc self.in_nc = in_nc self.out_nc = out_nc self.input_FiLM", "spatial size nc_factor = 2 ** (self.num_up - i) attn_layers.append(Cond_Attn(self.base_nc * nc_factor)) self.attn_layers", "self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc, skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM, use_attn=self.opt.D_use_attn) if self.opt.E_use_FiLM or self.opt.D_use_FiLM:", "reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu)", "layer num # self.neck_depth = neck_depth # FiLM layer bottleneck depth self.base_nc =", "\"in_shapes number mismatch self.num_up\" else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert", "super(Cond_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1) self.key_conv", "shapes, attn_maps class Decoder(BaseNetwork): \"\"\" Decoder for Image input: feature from encoder parmas:", "self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1) if self.use_attn: attn_layers = [] for i", "input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg) output: fake_image(tgt_img) also output FiLM parameters(alpha, beta) for", "else: raise ValueError(\"in_shapes not in Input\") if self.input_FiLM: assert len(FiLM_alphas) == len(FiLM_betas) ==", "mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Vb(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else:", "# for concating skip feats from encoder # torch.cat(feat, skip_feat) -> feat_next #", "# Output feature shape before DownSample shapes.append(x.shape[-2:]) x = self.down_block[i](x) if self.input_FiLM: x", "visualization \"\"\" def __init__(self, opt): super(SemIAGenerator, self).__init__() self.num_down = opt.num_down # Encoder feat", "feats from encoder # torch.cat(feat, skip_feat) -> feat_next # 256 -> 64, 128", "attention value + input feature attention: B X N X N (N is", "= alpha_conds, beta_conds if self.opt.D_use_attn: feats_cond.reverse() feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img,", "# output feats shape for inverse self.use_VAE = use_VAE # produce distribution for", "down_block = [] for i in range(self.num_down): # double channels after reduce spatial", "* nc_factor * 2, int(self.base_nc * nc_factor // 2))) else: up_block.append(UpConvBlock(self.base_nc * nc_factor,", "torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX (N) X (N)", "\"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_down, \"k_feats", "z_prev instead of torch.cat(noise+prev, prev) as input self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc,", "= out.view(m_batchsize, C, width, height) out = self.gamma * out + x return", "small value added to the variance to avoid divide-by-zero. size = feat.size() assert", "in range(self.num_up): if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map", "nc_factor = 2 ** (i + 1) # use base_nc * nc_factor //", "do not store attn_maps # if self.use_attn: # attn_maps = [] if noise", "U-Net skip connections help little. \"\"\" def __init__(self, num_up, base_nc, in_nc, out_nc, input_FiLM=False,", "separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg)", "E_param_dict[\"FiLM_alphas\"], E_param_dict[\"FiLM_betas\"] = alpha_conds, beta_conds if self.opt.E_use_attn: E_param_dict[\"k_feats\"], E_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug #", "use_attn # use attention mechanism # Decoder's head block out_nc = Encoder's tail", "channel size for conv layers self.cond_nc = opt.cond_nc # Condition channel size, 3", "= self.tail_block(x) return out, feats class Cond_Attn(nn.Module): \"\"\" Cond-Attention Module Attetion module may", "= opt.cond_nc # Condition channel size, 3 for seg self.im_nc = opt.im_nc #", "\"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) == self.num_up, \"k_feats", "of diff/ratio bottlenecks = [] for i in range(num_feats): # nc_factor nc_factor =", "# Noise + Prev_img -> Feat_img, code_img code_i, feats_img, _, attn_maps = self.encoder_i(x,", "pixel has W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B", "feats_cond_aug.reverse() D_param_dict[\"k_feats\"], D_param_dict[\"q_feats\"] = feats_cond, feats_cond_aug fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict) if", "W*H scores proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C", "q_feats=None): # code: code_img or code_cond if skip_feats is not None: assert len(skip_feats)", "if self.out_shapes: shapes = [] # do not store attn_maps # if self.use_attn:", "1e-14)) # feats_cond_aug / feats_cond rel_feats_diff = [] # use for beta(bias of", "if self.input_FiLM: x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i]) if self.use_attn: x, attn_map = self.attn_layers[i](x,", "== self.num_up, \"FiLM_alphas and FiLM_betas mismatch\" if self.use_attn: assert len(k_feats) == len(q_feats) ==", "def forward(self, feats): assert len(feats) == self.num_feats params = [] for i in", "alpha & beta separate params.append(self.bottlenecks[i](feats[i])) return params class SemIAGenerator(BaseNetwork): \"\"\" netG input: real", "num # self.neck_depth = neck_depth # FiLM layer bottleneck depth self.base_nc = opt.base_nc", "# if self.use_attn: # attn_maps = [] x = self.head_block(code) for i in", ": input feature maps( B X C X W X H) k :", "= torch.cat((input, noise), 1) x = self.head_block(input) for i in range(self.num_down): if self.out_shapes:" ]
[ "self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0)", "ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll =", "from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): '''", "{\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request,", "Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import", "from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from", "self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products)", "CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel", "ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def", "= self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services", "sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart}", "return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services)", "''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self,", "#coding=utf-8 ''' Created on 2015-9-24 @author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from", "self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart}", "= CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def", "request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds,", "= CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds", "request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request,", "= self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar,", "CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products =", "dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel,", "{\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds =", "CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in dm_ci_services: temp=VM_CIDeployService(service,0) result.append(temp) return", "= {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services =", "from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from", "request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart =", "CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds =", "import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import", "= self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return", "get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action)", "self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar,", "page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self,", "left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar,", "return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll}", "sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def", "self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar", "CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds =", "self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products,", "self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products", "Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action):", "request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self,", "from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class", "dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self,", "2015-9-24 @author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from", "sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return", "CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService", "request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self,", "request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart", "pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action)", "request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model,", "= CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar =", "''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products", "= CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action)", "= self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id):", "self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request,", "CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath", "self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def", "left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds =", "get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request,", "CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request,", "from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from", "= self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return", "import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor '''", "get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart", "doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models", "项目页面生成器 ''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar", "= self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id))", "self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,", "sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action)", "self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds", "def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart)", "{\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds", "CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action):", "sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in dm_ci_services: temp=VM_CIDeployService(service,0) result.append(temp) return result", "def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services}", "VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService", "self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar", "service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action):", "dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request)", "vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll", "\"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar =", "request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for", "def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,", "CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService", "request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def", "request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request,", "pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services", "CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request)", "ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id):", "CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def", "service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action):", "CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self,", "= {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds", "doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service", "return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in dm_ci_services:", "self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in dm_ci_services: temp=VM_CIDeployService(service,0)", "doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker):", "Created on 2015-9-24 @author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import", "return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar", "doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor", "pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path)", "doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path", "import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): '''", "''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar", "on 2015-9-24 @author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar", "def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services):", "CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self,", "\"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request)", "dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll)", "= self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request)", "def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return", "return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request,", "self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds,", "return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return", "get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action) ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return", "from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from", "dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart =", "''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model", "def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project} return self.get_webpart(pagefileds, CIServicePath.service_config_page)", "self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action,", "get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list()", "ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器", "''' Created on 2015-9-24 @author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar", "dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0) ci_service_config_webpart = self.ci_service_config_webpart(request,service_id)", "CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds,", "import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import", "CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds", "dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service", "{\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self,", "page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products =", "{\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar", "CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker", "= {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request)", "business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def", "CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request):", "''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel =", "= self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action) ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar,", "= {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project)", "import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import", "self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar =", "\"sub_nav_bar\":sub_nav_bar, \"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds =", "import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self,", "\"ci_service_config\":ci_service_config_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path,request) def ci_service_config_webpart(self, request,service_id): service=CIDeployService.objects.get(int(service_id)) vm_service=VM_CIDeployService(service,0) ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project) pagefileds = {\"service\":vm_service,\"ci_service_project\":ci_service_project}", "doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service", "doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker", "self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def get_ci_service_list_controll(self, request,sub_nav_action): dm_ci_services =", "import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker from business.ci.ci_service import CIService from doraemon.ci.models import", "import CIServiceSubNavBar from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService from doraemon.ci.pagefactory.ci_template_path import CIServicePath from doraemon.project.pagefactory.project_common_pageworker import", "__init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar", "= CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar =", "@author: Devuser ''' from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar", "get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return self.get_webpart(pagefileds, CIServicePath.service_list_webpart) def", "= {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request): return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def", "sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in", "from business.ci.ci_service import CIService from doraemon.ci.models import CIDeployService class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 '''", "self.get_webpart(pagefileds, CIServicePath.service_config_page) def get_ci_service_list_webpart(self, request,sub_nav_action): service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action) pagefileds = {\"ci_service_listcontroll\":service_list_controll} return", "from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar from", "def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request) self.pagemodel = CIServiceLeftNavBar self.subpage_model =", "class CIServicePageWorker(CIPageWorker): ''' 项目页面生成器 ''' def __init__(self, request): ''' Constructor ''' CIPageWorker.__init__(self, request)", "request) self.pagemodel = CIServiceLeftNavBar self.subpage_model = CIServiceSubNavBar def get_ci_service_fullpage(self, request,sub_nav_action): dm_products = CIService.get_products_include_me(request)", "return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path) def get_service_sub_navbar(self, request, dm_products, sub_nav_action): return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path,", "ci_services = self.get_ci_services(request, dm_ci_services) pagefileds = {\"ci_services\":ci_services} return self.get_webpart(pagefileds, CIServicePath.service_list_controll) def get_service_left_bar(self, request):", "def get_ci_service_config_page(self, request,service_id): dm_products = CIService.get_products_include_me(request) left_nav_bar = self.get_service_left_bar(request) sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0)", "ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action) page_fileds = {\"left_nav_bar\":left_nav_bar, \"sub_nav_bar\":sub_nav_bar, \"ci_service_webpart\":ci_service_webpart} return self.get_page(page_fileds,CIServicePath.service_index_path, request) def get_ci_service_config_page(self,", "self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products) def get_ci_services(self,request,dm_ci_services): result=list() for service in dm_ci_services: temp=VM_CIDeployService(service,0) result.append(temp)" ]
[ "flask_restful import abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist", "from config import Config from datetime import datetime, timedelta from flask import jsonify,", "import Users from config import Config from datetime import datetime, timedelta from flask", "get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register a new User by\" \"sending", "the Bucketlist API\" \"Register a new User by\" \"sending a POST request to", "passed. Please fill all fields\") else: username = data['username'] password = data['password'] if", "parameters passed. Please fill all fields\") else: username = data['username'] password = data['password']", "user: abort(400, message=\"User does not exist\") if user.check_password(password): payload = { 'sub': user.user_id,", "import jwt from app.models.bucketlist_models import Users from config import Config from datetime import", "from flask_restful import abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to the", "to the Bucketlist API\" \"Register a new User by\" \"sending a POST request", "= data['password'] if not username or not password: abort(400, message=\"Kindly fill in the", "password: abort(400, message=\"Kindly fill in the missing details\") user = Users.query.filter_by(username=username).first() if not", "request to auth/register\" \"Login by sending a post request to\" \"POST auth/login to", "\"Welcome to the Bucketlist API\" \"Register a new User by\" \"sending a POST", "if not data: abort( 400, message=\"No parameters passed. Please fill all fields\") else:", "{ 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY,", "new User by\" \"sending a POST request to auth/register\" \"Login by sending a", "if not username or not password: abort(400, message=\"Kindly fill in the missing details\")", "json import jwt from app.models.bucketlist_models import Users from config import Config from datetime", "Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register a new User", "User by\" \"sending a POST request to auth/register\" \"Login by sending a post", "fill in the missing details\") user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User", "missing details\") user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not exist\")", "jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\": token.decode('utf-8')}) else: abort(400, message=\"Invalid", "else: username = data['username'] password = data['password'] if not username or not password:", "payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\": token.decode('utf-8')}) else: abort(400, message=\"Invalid password\")", "user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token =", "auth/login to get started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send", "details\") user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not exist\") if", "to get started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send a", "API\" \"Register a new User by\" \"sending a POST request to auth/register\" \"Login", "data: abort( 400, message=\"No parameters passed. Please fill all fields\") else: username =", "from datetime import datetime, timedelta from flask import jsonify, request from flask_restful import", "auth/register\" \"Login by sending a post request to\" \"POST auth/login to get started\"})", "not username or not password: abort(400, message=\"Kindly fill in the missing details\") user", "class Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register a new", "Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send a POST request to /auth/login\"})", "post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters passed. Please", "not password: abort(400, message=\"Kindly fill in the missing details\") user = Users.query.filter_by(username=username).first() if", "from flask import jsonify, request from flask_restful import abort, Resource class Index(Resource): def", "if not user: abort(400, message=\"User does not exist\") if user.check_password(password): payload = {", "does not exist\") if user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow() +", "jsonify, request from flask_restful import abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome", "to\" \"POST auth/login to get started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To", "user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return", "jsonify({\"message\": \"To login,\" \"send a POST request to /auth/login\"}) def post(self): data =", "Users from config import Config from datetime import datetime, timedelta from flask import", "flask import jsonify, request from flask_restful import abort, Resource class Index(Resource): def get(self):", "= data['username'] password = data['password'] if not username or not password: abort(400, message=\"Kindly", "class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send a POST request to", "in the missing details\") user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does", "login,\" \"send a POST request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if", "not exist\") if user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30)", "or not password: abort(400, message=\"Kindly fill in the missing details\") user = Users.query.filter_by(username=username).first()", "def get(self): return jsonify({\"message\": \"To login,\" \"send a POST request to /auth/login\"}) def", "json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters passed. Please fill all fields\")", "400, message=\"No parameters passed. Please fill all fields\") else: username = data['username'] password", "+ timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username),", "timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\":", "data = json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters passed. Please fill", "started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send a POST request", "\"send a POST request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not", "sending a post request to\" \"POST auth/login to get started\"}) class Login(Resource): def", "payload = { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode(", "\"Register a new User by\" \"sending a POST request to auth/register\" \"Login by", "/auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters", "\"Login by sending a post request to\" \"POST auth/login to get started\"}) class", "data['password'] if not username or not password: abort(400, message=\"Kindly fill in the missing", "Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register a", "} token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\": token.decode('utf-8')})", "import jsonify, request from flask_restful import abort, Resource class Index(Resource): def get(self): return({\"message\":", "return jsonify({\"message\": \"To login,\" \"send a POST request to /auth/login\"}) def post(self): data", "config import Config from datetime import datetime, timedelta from flask import jsonify, request", "POST request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort(", "app.models.bucketlist_models import Users from config import Config from datetime import datetime, timedelta from", "a POST request to auth/register\" \"Login by sending a post request to\" \"POST", "a new User by\" \"sending a POST request to auth/register\" \"Login by sending", "by\" \"sending a POST request to auth/register\" \"Login by sending a post request", "fill all fields\") else: username = data['username'] password = data['password'] if not username", "import datetime, timedelta from flask import jsonify, request from flask_restful import abort, Resource", "from app.models.bucketlist_models import Users from config import Config from datetime import datetime, timedelta", "if user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token", "Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not exist\") if user.check_password(password): payload =", "request to\" \"POST auth/login to get started\"}) class Login(Resource): def get(self): return jsonify({\"message\":", "post request to\" \"POST auth/login to get started\"}) class Login(Resource): def get(self): return", "Bucketlist API\" \"Register a new User by\" \"sending a POST request to auth/register\"", "a post request to\" \"POST auth/login to get started\"}) class Login(Resource): def get(self):", "request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort( 400,", "'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\":", "username = data['username'] password = data['password'] if not username or not password: abort(400,", "exist\") if user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) }", "= jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\": token.decode('utf-8')}) else: abort(400,", "abort( 400, message=\"No parameters passed. Please fill all fields\") else: username = data['username']", "Please fill all fields\") else: username = data['username'] password = data['password'] if not", "token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome {}\".format(user.username), \"token\": token.decode('utf-8')}) else:", "message=\"No parameters passed. Please fill all fields\") else: username = data['username'] password =", "POST request to auth/register\" \"Login by sending a post request to\" \"POST auth/login", "= json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters passed. Please fill all", "username or not password: abort(400, message=\"Kindly fill in the missing details\") user =", "abort(400, message=\"User does not exist\") if user.check_password(password): payload = { 'sub': user.user_id, 'exp':", "= Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not exist\") if user.check_password(password): payload", "= { 'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload,", "timedelta from flask import jsonify, request from flask_restful import abort, Resource class Index(Resource):", "user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not exist\") if user.check_password(password):", "get(self): return jsonify({\"message\": \"To login,\" \"send a POST request to /auth/login\"}) def post(self):", "import abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist API\"", "password = data['password'] if not username or not password: abort(400, message=\"Kindly fill in", "\"sending a POST request to auth/register\" \"Login by sending a post request to\"", "data['username'] password = data['password'] if not username or not password: abort(400, message=\"Kindly fill", "import json import jwt from app.models.bucketlist_models import Users from config import Config from", "\"POST auth/login to get started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\"", "'sub': user.user_id, 'exp': datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256')", "get started\"}) class Login(Resource): def get(self): return jsonify({\"message\": \"To login,\" \"send a POST", "fields\") else: username = data['username'] password = data['password'] if not username or not", "Config from datetime import datetime, timedelta from flask import jsonify, request from flask_restful", "request from flask_restful import abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to", "jwt from app.models.bucketlist_models import Users from config import Config from datetime import datetime,", "to auth/register\" \"Login by sending a post request to\" \"POST auth/login to get", "abort(400, message=\"Kindly fill in the missing details\") user = Users.query.filter_by(username=username).first() if not user:", "a POST request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not data:", "return({\"message\": \"Welcome to the Bucketlist API\" \"Register a new User by\" \"sending a", "not data: abort( 400, message=\"No parameters passed. Please fill all fields\") else: username", "datetime.utcnow() + timedelta(minutes=30) } token = jwt.encode( payload, Config.SECRET_KEY, algorithm='HS256') return jsonify({\"message\": \"Welcome", "all fields\") else: username = data['username'] password = data['password'] if not username or", "<filename>app/auth/login.py import json import jwt from app.models.bucketlist_models import Users from config import Config", "not user: abort(400, message=\"User does not exist\") if user.check_password(password): payload = { 'sub':", "abort, Resource class Index(Resource): def get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register", "to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No", "message=\"User does not exist\") if user.check_password(password): payload = { 'sub': user.user_id, 'exp': datetime.utcnow()", "message=\"Kindly fill in the missing details\") user = Users.query.filter_by(username=username).first() if not user: abort(400,", "import Config from datetime import datetime, timedelta from flask import jsonify, request from", "def post(self): data = json.loads(request.get_data(as_text=True)) if not data: abort( 400, message=\"No parameters passed.", "datetime, timedelta from flask import jsonify, request from flask_restful import abort, Resource class", "datetime import datetime, timedelta from flask import jsonify, request from flask_restful import abort,", "the missing details\") user = Users.query.filter_by(username=username).first() if not user: abort(400, message=\"User does not", "by sending a post request to\" \"POST auth/login to get started\"}) class Login(Resource):", "\"To login,\" \"send a POST request to /auth/login\"}) def post(self): data = json.loads(request.get_data(as_text=True))", "def get(self): return({\"message\": \"Welcome to the Bucketlist API\" \"Register a new User by\"" ]
[ "AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, #", "from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin):", "type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description,", "is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >= 3", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption,", "# type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs):", "EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type:", "Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, )", "AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, #", "# type: str template=None, # type: Template validation=True, # type: bool FlowArn=REQUIRED, #", "# type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str,", "bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type:", "AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig,", "type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn]", "type: str template=None, # type: Template validation=True, # type: bool FlowArn=REQUIRED, # type:", "type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, #", "# type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING,", "# type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING,", "template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs)", "AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, #", "Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, #", "type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn]", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption,", "FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int", "title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement,", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn,", "import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING", "title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, #", "Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING,", "# type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn]", "FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True,", "Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs", "# type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn]", "Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str,", "title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId,", "AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type:", "Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED,", "= preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency,", "# type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self,", "type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING,", "RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED,", "= preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self,", ") super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption", "_Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, #", "Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type:", "preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title,", "import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source", "5: # pragma: no cover from typing import Union, List, Any import troposphere.mediaconnect", "int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str,", "IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency,", "# type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING,", "Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title,", "# type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str,", "AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn,", "# type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn]", ") super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str template=None,", "# -*- coding: utf-8 -*- \"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py", "Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type:", "StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title,", "Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str,", "# type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs):", "**kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str", ") from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel", "type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs", "type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, #", "# type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn]", "# type: str template=None, # type: Template validation=True, # type: bool Name=REQUIRED, #", "Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn]", "_VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination,", "template=None, # type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn]", "self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str template=None, # type:", "and sys.version_info.minor >= 5: # pragma: no cover from typing import Union, List,", "AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation,", "AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type:", "Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs):", "Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, #", "type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Name=NOTHING, # type:", "troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as", "AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str,", "# type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn]", "type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn]", "int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State,", "KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def", "title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", ") super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str template=None,", "RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self,", "preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port,", "**kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str", "super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str template=None, #", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin):", "int MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type:", "type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING,", "AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type:", "# type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED,", "processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow,", "Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption,", "int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", "MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource,", "str template=None, # type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str,", "CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs )", "int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, #", "**kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str", "# type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str,", "# type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str,", "AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type:", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn,", "# type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn]", "FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True,", "type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING,", "FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin):", "Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type:", "AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str,", "str template=None, # type: Template validation=True, # type: bool Name=REQUIRED, # type: Union[str,", "AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, #", "def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn]", "Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str,", "type: str template=None, # type: Template validation=True, # type: bool Description=REQUIRED, # type:", "validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def", "Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type:", "Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type:", "# type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str,", "# type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING,", "int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, #", "SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING,", "= preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs", "AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, #", "sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover from typing", "type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, #", "# type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED,", "KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type:", "Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self,", "type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING,", "3 and sys.version_info.minor >= 5: # pragma: no cover from typing import Union,", "WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type:", "Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn]", "bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, #", "type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING,", "FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment,", "FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class", "AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, #", "def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs):", "SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source,", "Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs):", "import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self,", "WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type:", "troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type:", "Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs)", "AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int", "Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type:", "type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description,", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption,", "FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type:", "RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str,", "bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, #", "str template=None, # type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str,", "processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol,", "Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self,", "# type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING,", "processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus,", "type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn]", "Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type:", "sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover", "AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, #", "type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn]", "type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig,", "coding: utf-8 -*- \"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\"", "Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, #", "VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", "int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type:", "preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class", "AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type:", "super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str template=None, #", "FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type:", "Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin):", "Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, #", "troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment", "bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, #", "__init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING,", "Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type:", "# type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING,", "type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow,", "__init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str template=None, # type:", "preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self,", "# type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str,", "Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type:", "**kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type:", "AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, #", "as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate", "DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type:", "SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type:", "Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation,", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs )", "**kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type:", "type: str template=None, # type: Template validation=True, # type: bool Name=REQUIRED, # type:", "type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn]", "super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str template=None, #", "type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING,", "Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn]", "Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def", "type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]]", "# type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs)", "# type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str,", "type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, #", "type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn]", "# type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn]", "SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput,", "type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, #", "type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING,", "Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin):", "SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", "__init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName,", "Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def", "title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs", "from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, #", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name,", "RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class", "template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin):", "ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None,", "Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING,", "# type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption,", "# type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str,", "# pragma: no cover from typing import Union, List, Any import troposphere.mediaconnect from", "preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None,", "title, # type: str template=None, # type: Template validation=True, # type: bool Name=REQUIRED,", "type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, #", "Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig", "VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type:", "self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str template=None, # type:", "Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING,", "Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin):", "class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type:", "AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn,", "type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn]", ") super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str template=None,", "title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement,", "template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName,", "MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type: int", "Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED,", "# type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption", "type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template,", "type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs", "Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def", "import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int", "validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str,", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment,", "validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str,", "code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >=", "WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description,", "AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, #", "AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type:", "template=None, # type: Template validation=True, # type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn]", "_FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn", "REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING,", "preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId,", "Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type:", "AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, #", "MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn]", "= preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs ) super(Flow, self).__init__(**processed_kwargs)", "if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover from", "FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import", "Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template,", "( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment,", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig,", "SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn,", "VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs =", "as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin", "validation=True, # type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source", "EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs)", "# type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str,", "type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn]", "IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Name=NOTHING,", "template=None, # type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn]", ") super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str,", "# type: int MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING,", "# type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList,", "troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED,", "from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import", "State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, #", "VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs )", "title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type:", "# type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str,", "MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class", "troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from troposphere_mate.core.sentiel import REQUIRED, NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def", "Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type:", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort,", "-*- \"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys", "FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str,", "type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName,", "AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description,", "template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency,", "AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, #", "# type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn]", "type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING,", "# type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str,", "Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING,", ") super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str template=None,", "self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs):", "**kwargs ) super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str", "**kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str", "_VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin from", "super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str template=None, #", "Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource,", "class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str template=None, # type: Template", "import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption,", "# type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType,", "from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source,", "# type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int", "self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, #", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name,", "VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, #", "VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import preprocess_init_kwargs,", "# type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Name=NOTHING, #", "List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type:", "SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment", "NOTHING class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, #", ">= 3 and sys.version_info.minor >= 5: # pragma: no cover from typing import", "Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str,", "AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, #", "type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED,", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region,", "MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow,", "SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title,", "Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs):", "processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate,", "processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def", "type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED,", "# type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING,", "AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs(", "from typing import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption", "as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from", ") super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str,", "AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, #", "validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr,", "processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency,", "title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption,", "State=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs", "type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING,", ">= 5: # pragma: no cover from typing import Union, List, Any import", "type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn]", "Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True,", "AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, #", "= preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs", "MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str,", "int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type:", "Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED,", "Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, #", "_Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type:", "SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type:", "# type: Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED,", "MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def", "Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, #", "# type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption", "_Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, #", "-*- coding: utf-8 -*- \"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.", "MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str,", "Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self,", "_FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone, SourceFailoverConfig=SourceFailoverConfig, **kwargs", "scripts. \"\"\" import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: #", "RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class", ") super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str template=None,", "Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption", "Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr,", "type: int MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, #", "# type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str,", "IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs)", "Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING,", "Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type:", "Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class", "IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING,", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn,", "cover from typing import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import (", "Name=NOTHING, # type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type:", "type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn]", "# type: str template=None, # type: Template validation=True, # type: bool Description=REQUIRED, #", "Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True, #", "Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs", "AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING,", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort,", "class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs", "type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, #", "Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING,", "int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type:", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class", "preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol,", "title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId,", "from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >= 3 and sys.version_info.minor >=", "type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type:", "processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds, SubnetId=SubnetId, **kwargs )", "def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None,", "class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, #", "List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING,", "processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin):", "DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn]", "IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface,", "type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, #", "AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, #", "def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING,", "Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING,", "_Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation,", "# type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major", "type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING,", "Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs )", "Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput,", "= preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def", "Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type: int", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds,", "Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn, FlowArn=FlowArn, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs", "utf-8 -*- \"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import", "self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str template=None, # type:", "validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str,", "AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation,", "type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, #", "type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn]", "# type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class", "type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]]", "sys.version_info.minor >= 5: # pragma: no cover from typing import Union, List, Any", "# type: Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Protocol=REQUIRED,", "validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str,", "Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm,", "type: _Encryption MaxLatency=NOTHING, # type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, #", "# type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str,", "IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source,", "Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment,", "<filename>troposphere_mate/mediaconnect.py # -*- coding: utf-8 -*- \"\"\" This code is auto generated from", "type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, #", "FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True,", "type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn]", "type: _Encryption EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template,", "\"\"\" This code is auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if", "\"\"\" import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma:", "Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig", "# type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str,", "# type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str,", "no cover from typing import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import", "AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, #", "# type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int", "Template validation=True, # type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, # type:", "processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url,", "ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type:", "_Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from troposphere_mate.core.mate import", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name,", "= preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name,", "# type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn]", "title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, #", "type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn]", "EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs )", "Template validation=True, # type: bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type:", "# type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn]", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, VpcInterfaceName=VpcInterfaceName, **kwargs ) super(VpcInterfaceAttachment, self).__init__(**processed_kwargs)", "# type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str,", "# type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str,", "Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING,", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp,", "CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type: Union[str, AWSHelperFn] Destination=NOTHING, # type:", "Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig,", "self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED,", "**kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type:", "# type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn]", "# type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str,", "class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str template=None, # type: Template", "pragma: no cover from typing import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect", "RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type:", "title, # type: str template=None, # type: Template validation=True, # type: bool FlowArn=REQUIRED,", "# type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs(", "class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str template=None, # type: Template", "List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template,", "WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn,", "# type: int Name=NOTHING, # type: Union[str, AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING,", "MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Protocol=Protocol, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(FlowSource, self).__init__(**processed_kwargs) class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin):", "title, # type: str template=None, # type: Template validation=True, # type: bool Description=REQUIRED,", "type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING,", "EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type:", "typing import Union, List, Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as", "super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn]", "VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Protocol=Protocol,", "type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]] SubnetId=REQUIRED, # type: Union[str, AWSHelperFn]", "Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING, # type: Union[str, AWSHelperFn] ResourceId=NOTHING,", "RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type:", "type: Union[str, AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn]", "StreamId=StreamId, VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, #", "AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, #", "# type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str,", "Name=REQUIRED, # type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str,", "type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING,", "super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn]", "Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING, #", "type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name,", "# type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs):", "AWSHelperFn] Destination=NOTHING, # type: Union[str, AWSHelperFn] Encryption=NOTHING, # type: _Encryption MaxLatency=NOTHING, # type:", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId,", "type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING,", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds, SubnetId=SubnetId,", "# type: Template validation=True, # type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED,", "preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent, Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs )", "preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds, SubnetId=SubnetId, **kwargs ) super(FlowVpcInterface, self).__init__(**processed_kwargs)", "FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type:", "StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING, # type:", "Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int", "type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]]", "Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation,", "type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str, AWSHelperFn] IngestPort=NOTHING, # type: int MaxBitrate=NOTHING,", "generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >= 3 and sys.version_info.minor", "type: Union[str, AWSHelperFn] Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title,", "# type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, #", "List, Any import troposphere.mediaconnect from troposphere.mediaconnect import ( Encryption as _Encryption, FailoverConfig as", "AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name,", "# type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Name=Name, Source=Source, AvailabilityZone=AvailabilityZone,", "type: Union[str, AWSHelperFn] Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING,", "Union[str, AWSHelperFn] StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn] WhitelistCidr=NOTHING,", "type: Template validation=True, # type: bool Name=REQUIRED, # type: Union[str, AWSHelperFn] Source=REQUIRED, #", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, Name=Name, Decryption=Decryption, EntitlementArn=EntitlementArn,", "import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no", "SourceArn=SourceArn, StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self,", "AWSHelperFn] FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, #", "Mixin): def __init__(self, title=None, RecoveryWindow=NOTHING, # type: int State=NOTHING, # type: Union[str, AWSHelperFn]", "EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING, #", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class", "Url=NOTHING, # type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector,", "__init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, #", "MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str, AWSHelperFn]", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds, SubnetId=SubnetId, **kwargs", "class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str template=None, # type: Template", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate,", "as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template,", "AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type:", "# type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING, # type: int Encryption=NOTHING, # type: _Encryption EntitlementStatus=NOTHING,", "import ( Encryption as _Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as", "Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers,", "AWSHelperFn] Protocol=NOTHING, # type: Union[str, AWSHelperFn] SourceArn=NOTHING, # type: Union[str, AWSHelperFn] StreamId=NOTHING, #", "# type: Union[str, AWSHelperFn] Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str,", "preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs )", "Union[str, AWSHelperFn] ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING,", "FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type: str template=None, # type: Template validation=True,", "ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs) class Source(troposphere.mediaconnect.Source,", "type: int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type:", "__init__(self, title, # type: str template=None, # type: Template validation=True, # type: bool", "bool FlowArn=REQUIRED, # type: Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, #", "StreamId=StreamId, VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title,", "= preprocess_init_kwargs( title=title, Decryption=Decryption, Description=Description, EntitlementArn=EntitlementArn, IngestIp=IngestIp, IngestPort=IngestPort, MaxBitrate=MaxBitrate, MaxLatency=MaxLatency, Name=Name, Protocol=Protocol, SourceArn=SourceArn,", "**kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn,", "Union[str, AWSHelperFn] DeviceId=NOTHING, # type: Union[str, AWSHelperFn] KeyType=NOTHING, # type: Union[str, AWSHelperFn] Region=NOTHING,", "Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId, SecretArn=SecretArn, Url=Url, **kwargs ) super(Encryption, self).__init__(**processed_kwargs)", "int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs =", "title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs ) super(FailoverConfig, self).__init__(**processed_kwargs) class Encryption(troposphere.mediaconnect.Encryption, Mixin): def __init__(self, title=None,", "super(Flow, self).__init__(**processed_kwargs) class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin): def __init__(self, title, # type: str template=None, #", "class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, # type: str template=None, # type: Template", "StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment **kwargs): processed_kwargs = preprocess_init_kwargs(", "_Encryption, FailoverConfig as _FailoverConfig, Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere", "Template validation=True, # type: bool Description=REQUIRED, # type: Union[str, AWSHelperFn] FlowArn=REQUIRED, # type:", "Protocol=REQUIRED, # type: Union[str, AWSHelperFn] CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]] Description=NOTHING, # type:", "# type: Union[str, AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, RecoveryWindow=RecoveryWindow, State=State, **kwargs )", "= preprocess_init_kwargs( title=title, template=template, validation=validation, FlowArn=FlowArn, Name=Name, RoleArn=RoleArn, SecurityGroupIds=SecurityGroupIds, SubnetId=SubnetId, **kwargs ) super(FlowVpcInterface,", "troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >= 3 and sys.version_info.minor >= 5:", "VpcInterfaceName=VpcInterfaceName, WhitelistCidr=WhitelistCidr, **kwargs ) super(Source, self).__init__(**processed_kwargs) class Flow(troposphere.mediaconnect.Flow, Mixin): def __init__(self, title, #", "Union[str, AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]] DataTransferSubscriberFeePercent=NOTHING,", "class Source(troposphere.mediaconnect.Source, Mixin): def __init__(self, title=None, Decryption=NOTHING, # type: _Encryption Description=NOTHING, # type:", "int MaxBitrate=NOTHING, # type: int MaxLatency=NOTHING, # type: int Protocol=NOTHING, # type: Union[str,", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, Algorithm=Algorithm, RoleArn=RoleArn, ConstantInitializationVector=ConstantInitializationVector, DeviceId=DeviceId, KeyType=KeyType, Region=Region, ResourceId=ResourceId,", "Decryption=NOTHING, # type: _Encryption EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] FlowArn=NOTHING, # type: Union[str,", "Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, # type: Union[str, AWSHelperFn] VpcInterfaceAttachment=NOTHING, #", "AWSHelperFn] Port=NOTHING, # type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type:", "self).__init__(**processed_kwargs) class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin): def __init__(self, title, # type: str template=None, # type:", "ResourceId=NOTHING, # type: Union[str, AWSHelperFn] SecretArn=NOTHING, # type: Union[str, AWSHelperFn] Url=NOTHING, # type:", "type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template,", "# type: Union[str, AWSHelperFn] Source=REQUIRED, # type: _Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn]", "_Source AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn] SourceFailoverConfig=NOTHING, # type: _FailoverConfig **kwargs): processed_kwargs =", "Source as _Source, VpcInterfaceAttachment as _VpcInterfaceAttachment, ) from troposphere import Template, AWSHelperFn from", "VpcInterfaceAttachment=VpcInterfaceAttachment, **kwargs ) super(FlowOutput, self).__init__(**processed_kwargs) class FlowSource(troposphere.mediaconnect.FlowSource, Mixin): def __init__(self, title, # type:", "Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, # type:", "Encryption=Encryption, EntitlementStatus=EntitlementStatus, **kwargs ) super(FlowEntitlement, self).__init__(**processed_kwargs) class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin): def __init__(self, title=None, VpcInterfaceName=NOTHING,", "AWSHelperFn] **kwargs): processed_kwargs = preprocess_init_kwargs( title=title, template=template, validation=validation, Description=Description, FlowArn=FlowArn, Name=Name, Subscribers=Subscribers, DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent,", "type: int RemoteId=NOTHING, # type: Union[str, AWSHelperFn] SmoothingLatency=NOTHING, # type: int StreamId=NOTHING, #", "def __init__(self, title, # type: str template=None, # type: Template validation=True, # type:", "Mixin): def __init__(self, title=None, Algorithm=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str,", "AWSHelperFn] Name=REQUIRED, # type: Union[str, AWSHelperFn] RoleArn=REQUIRED, # type: Union[str, AWSHelperFn] SecurityGroupIds=REQUIRED, #", "validation=validation, FlowArn=FlowArn, Protocol=Protocol, CidrAllowList=CidrAllowList, Description=Description, Destination=Destination, Encryption=Encryption, MaxLatency=MaxLatency, Name=Name, Port=Port, RemoteId=RemoteId, SmoothingLatency=SmoothingLatency, StreamId=StreamId,", "Description=NOTHING, # type: Union[str, AWSHelperFn] EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn] IngestIp=NOTHING, # type:", "auto generated from troposphere_mate.code_generator.__init__.py scripts. \"\"\" import sys if sys.version_info.major >= 3 and" ]
[ "assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\",", "= self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta", "self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record(", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with", "f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def", "assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg #", "= self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq(", "self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\",", "self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError)", "pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func", "in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock", "e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self):", "self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli =", "e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(", "pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table", "test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg # real", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq(", "pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field =", "pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\"", "self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\"", "assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\",", "as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e:", "test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self):", "from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\",", "def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with", "self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) )", "def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable", "in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field", "def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table", "= origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as", "= self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in", "test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", "mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise", "f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in", "def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\",", "f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with", "self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli =", "test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(", "as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self):", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self):", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with", "self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request class", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\",", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\"", "as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self):", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func =", "tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args,", "0 assert \"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args,", "self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) )", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\"", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list", "origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e:", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "# Code generated by lark_sdk_gen. DO NOT EDIT. import unittest import pylark import", "self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list =", "in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) )", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError)", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq(", "f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with", "e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\"", "def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type", "origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e:", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\",", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() )", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\"", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in", "e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\",", "app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e:", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "__init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def", "pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as", "test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "> 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", )", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self):", "def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type", "= self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record", "self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record", "in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args,", "> 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) )", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in", "e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func =", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq(", "origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e:", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self):", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "> 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\",", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() )", "self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq()", "assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\",", "e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", )", "test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\"", "= origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self):", "self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field", "pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func", "f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with", "f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self):", "e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) )", "def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert", "test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field =", "pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())", "# mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args,", "e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq(", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\"", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with", "f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def", "self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError)", "origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e:", "test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert", "as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self):", "self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e:", "in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with", "test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\",", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(", "= self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq(", "e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func =", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\",", "def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\"", "0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", )", "origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() )", "def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() )", "e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field =", "self.module_cli.get_bitable_meta = origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs):", "pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def", "self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\",", "0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) )", "# real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\",", "def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert", "e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func =", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self):", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with", "e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table(", "e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self):", "> 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) )", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field =", "e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with", "self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\"", "test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def", "def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type", "self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\"", "self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field", "test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq()", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record(", "test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq(", "e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func =", "self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in", "e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins()", "in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\"", "msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed,", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq()", "self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view =", "in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock", "in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\"", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with", "def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in", "self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func", "raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase):", "f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def", "in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", "self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\"", "> 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\",", "0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) )", "e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) )", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table =", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\",", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record =", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq(", "table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())", "0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "= self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in", "in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock", "as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError)", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\",", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field =", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq(", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view", "test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert", "def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self):", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def", "self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\"", "0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self):", "in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) )", "test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\",", "self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field", "f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with", "origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with", "app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert", "e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as", "in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock", "0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())", "= app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError)", "e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func =", "self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\"", "0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", )", "e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\",", "as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert", "e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table(", "0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with", "self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func", "pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\"", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table =", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self):", "pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\"", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table =", "def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())", "assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\",", "in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\",", "view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\"", "assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def", "scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self,", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\",", "assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\",", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with", "pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq(", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError)", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table(", "self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError)", "self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError)", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq(", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", )", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with", "f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\",", "= self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg # real request", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with", ") assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table =", "f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with", "e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "= self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert", "pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\"", "= app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self):", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type", "self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq()", "raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli =", "test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\",", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self):", "test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "**kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable", "f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in", "as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError)", ") assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\"", "app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1,", ") assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert", "= self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in", "app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\")", "self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table", "pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func =", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record =", "self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "= self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in", "f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with", "mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)", "class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in", "pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table =", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(", "origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq(", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e:", "msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func", "test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with", "self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\"", "e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", "in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\"", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\"", "in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", )", "in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\"", "e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def", "origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\"", "= self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase):", "# mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs)", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(", "e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\",", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self):", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", )", "assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\",", "test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e:", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta =", "> 0 assert \"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self,", "> 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", )", "e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self):", "= origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as", "self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self):", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\"", "import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in", "test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert", "assert \"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs):", "test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def", "test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq()", "= mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock", "e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq(", "self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert", "def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def", "self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func", "e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq(", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError)", "__init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request", "TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli =", "def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self):", "self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\",", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "= self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as", "mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) )", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self):", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq(", "DO NOT EDIT. import unittest import pylark import pytest from tests.test_conf import app_all_permission,", "> 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record(", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\"", "e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def", "app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\",", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self):", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert", "test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with", "e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "= origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed,", "real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli =", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\",", "self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list", "test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\",", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self):", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func", "pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self):", "def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\",", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq(", "= self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in", "def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def", "= origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self):", "as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self):", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg # real request class", "self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def", "self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\"", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e:", ") assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self):", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) )", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq()", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError", "def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert", "in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) )", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e:", "pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table(", "\"msg=failed\" in f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args,", "in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock", "self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table", "e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())", "e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())", "e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) )", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\",", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func", "test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with", "as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self):", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\"", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\",", "def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self):", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e:", "in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", )", "test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def", "f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def", "e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as", "f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with", "= origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) )", "= origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as", "test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with", "mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\",", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in", "self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\"", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", )", "e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs)", "e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", )", "test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\",", "as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self):", "self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table", "assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\",", "record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self):", "e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func =", "= origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as", "= origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is", "TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli =", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list =", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq(", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq(", "f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with", "test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self):", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with", "e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "__init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def", "import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\",", "app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\"", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\",", "__init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token", "def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type", "in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", )", "= origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self):", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func", "origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\"", "mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e:", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table", "pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record =", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func", "as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func", "as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock self", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\",", "> 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\",", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field", "0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func", "self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record =", "def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self):", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record(", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\",", "def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table(", "def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert", "def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\"", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record =", "code=1, msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs):", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\"", "as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw", "self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record", "= self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in", "self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError)", "= self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in", "e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())", "origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() )", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError)", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\"", "def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert", "self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert", "in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\"", "test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\"", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self):", "self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\"", "e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", )", "in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\"", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq(", "e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record(", "def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert", "self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq(", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func", "app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self):", "test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self):", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self):", "self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\"", "generated by lark_sdk_gen. DO NOT EDIT. import unittest import pylark import pytest from", "in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock", "super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func =", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record", "in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self):", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self):", "> 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", )", "test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\"", "test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view", ") assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list", "self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list", "f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in", "test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\"", "app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert", "pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())", "f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" #", "e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with", "in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock", "self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert", "self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError)", "def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table(", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with", "as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func", "= origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as", "as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e:", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\"", "self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "= self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", )", "e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with", "test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with", "in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\"", "app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self):", "assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\",", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\",", "def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", )", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table =", "tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\",", "self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func", "f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def", "in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock", "> 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", )", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())", "e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func =", "func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args,", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError)", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq(", "test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert", "e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(", "e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())", ") # mock get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args,", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\",", "table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "> 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e:", "table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\",", "self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\" def", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table(", "as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with", "def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) )", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in", "= self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert", "test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert", "def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self):", "as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\",", "e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())", "e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func =", "f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def", "self.module_cli.get_bitable_view_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() )", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self):", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self):", "e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", )", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\",", "e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func =", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list =", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\"", "mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\"", "= self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view =", "assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\",", "def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record", "test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def", "0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) )", "assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\",", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq(", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock", "from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise", "e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert", "as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock self func class", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq(", "app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq(", "e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\",", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self):", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq(", "e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\",", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\"", "test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self):", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in", "self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError)", "self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError)", "self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", ") assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\",", "0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", ") assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record", "self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\"", "as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def", "e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\",", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list =", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self):", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError", "test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list =", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in", "e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())", "self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with", "test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) )", "EDIT. import unittest import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from", "self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\",", "as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "= origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as", "f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func #", "self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\",", "as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record", "self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args,", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "**kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with", "as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list =", "test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\"", "> 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", )", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock", "origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args,", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\"", "as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func", "func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" )", "class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli", "as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func", "self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq(", "test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func", "self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record =", "self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "> 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", "request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins()", "in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock", "assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\",", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\",", "def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())", "self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq(", "test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func =", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq(", "pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with", "= origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e:", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record =", "as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert", "def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is", "origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e:", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\"", "table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is", "self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in", "def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self):", "code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) #", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\",", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert", "as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())", "self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(", "def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type", "import mock_get_tenant_access_token_failed def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs):", "in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", )", "test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with", "origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e:", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq(", "class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record =", "self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError)", "app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def", "pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError)", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\"", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def", "f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def", "record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self):", "\"mock-raw-request-failed\" in e.value.msg # real request class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed,", "= self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in", "in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\"", "pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record =", "e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() )", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self):", "self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record", "as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func", "self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field", "test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with", "= origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq(", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table =", "*args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token =", "origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with", "app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self):", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list", "self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in", "test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq()", "def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in", "as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type", "0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", "self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record", "in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock", "*args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request =", "e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock self func", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError)", "= origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self):", "as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record(", "self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\"", "def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with", "test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) ) assert", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func", "as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError)", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq(", "origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e:", "self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func", "e.value.code > 0 def test_real_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\",", "def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type", "self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with", "= self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list =", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\",", "self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\"", "self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self):", "in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\"", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self):", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(", "self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert", "= app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq(", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is", "e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase):", "f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(", "**kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with", "**kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(", "def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli", "e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert", "def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self):", "e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func =", "self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record", "origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\"", "e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", )", "import unittest import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper", "test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert", "> 0 def test_real_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", "table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\"", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view = origin_func def", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record =", "in e.value.msg def test_mock_raw_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\",", "as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list = mock with", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\",", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def", "raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1,", "e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func =", "def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type", "origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\"", "pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "origin_func def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e:", "# mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs)", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq(", "e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", )", ") assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert", "f\"{e}\" self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with", "\"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", )", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record", "> 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", )", "*args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self):", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() )", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\",", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\"", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record(", "func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins()", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self):", "def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "in f\"{e}\" def test_mock_get_token_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\"", "= self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in", "def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_table = origin_func", "e.value.msg def test_mock_raw_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) )", "test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with", "field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self):", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\"", ") assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq(", "= mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed self.module_cli = self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as", "as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert", "in f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs):", "pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func =", "test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\"", "test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq()", "test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert", "0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) )", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view =", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view", "in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock", "> 0 def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field =", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def", "in f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock", "test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is", "test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert", "record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_record(self):", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError)", "as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase):", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self):", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(", "= origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", "self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq(", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self):", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record(", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self):", "in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", )", "as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_record = origin_func def", "e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self):", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=failed\"", "origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self):", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as", "> 0 def test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) )", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() )", "mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli", "table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta =", "def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self):", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view =", "= app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as", "get token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli =", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func", "as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func", "unittest import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import", "origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\"", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record(", "self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field", "= self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\" in", "def test_real_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record( pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError)", "in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) )", "test_real_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is", "in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\",", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq(", "origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\"", "f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func = self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with", "pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def", "as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError)", "assert e.value.code > 0 def test_real_request_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\",", "def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())", "test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with", "origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e:", "self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "= mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list =", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record", "pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func =", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())", "e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record = origin_func def test_mock_self_func_batch_delete_bitable_record(self): origin_func =", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_view_list = origin_func", "self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self): with", "token class TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins()", "self.module_cli.batch_delete_bitable_record self.module_cli.batch_delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 assert", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError", "e: self.module_cli.create_bitable_record( pylark.CreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\"", "pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "= self.cli.bitable def test_mock_get_token_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq()) assert \"msg=failed\" in f\"{e}\"", "f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def", "in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock", "self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError)", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self): with", "= origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", ") assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\",", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as", "def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) )", "TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli =", "by lark_sdk_gen. DO NOT EDIT. import unittest import pylark import pytest from tests.test_conf", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\",", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(", "self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self):", "assert e.value.code > 0 def test_real_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\",", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "assert \"msg=failed\" in f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self,", "f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def", "f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(", "as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record(", "origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e:", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record =", "test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table(", "origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=mock-failed\"", "def test_mock_raw_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is", "self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e.value.code > 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", )", "e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self):", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\"", "origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func = self.module_cli.batch_update_bitable_record self.module_cli.batch_update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq(", "self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_record(self):", "def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())", "self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq() ) assert \"msg=mock-failed\"", "**kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed", "self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func", "in e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\",", "test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert", "in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\"", "= origin_func def test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as", "test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "Code generated by lark_sdk_gen. DO NOT EDIT. import unittest import pylark import pytest", "origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=mock-failed\"", "test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq(", "test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self):", "self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq(", "= mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", )", "test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as", "as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e:", "mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record(self):", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func def test_mock_self_func_batch_create_bitable_record(self):", "e: self.module_cli.batch_create_bitable_record( pylark.BatchCreateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(", "self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError)", "self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) )", "def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) )", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(", "super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func =", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as", "def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record(self):", "pylark.UpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(", "assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\",", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e:", "0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert", "table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self):", "assert e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\",", "**kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\", code=1, msg=\"mock-raw-request-failed\" ) # mock get token class", "def test_mock_self_func_get_bitable_record_list(self): origin_func = self.module_cli.get_bitable_record_list self.module_cli.get_bitable_record_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())", "self.module_cli.create_bitable_table(pylark.CreateBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq()", "self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq()", "e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())", "def test_mock_raw_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) )", "pytest from tests.test_conf import app_all_permission, app_no_permission from tests.test_helper import mock_get_tenant_access_token_failed def mock(*args, **kwargs):", "in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record =", "> 0 def test_real_request_batch_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq( app_token=\"x\", ) )", "self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_field_list", "> 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) )", "assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\",", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError)", "def test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self):", "is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(", "self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with", "self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\"", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_delete_bitable_table(self): with", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self):", "assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list", "as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "= origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as", "def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_create_bitable_record(self):", "**kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError( scope=\"scope\", func=\"func\",", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_table_list = origin_func def test_mock_self_func_create_bitable_table(self): origin_func = self.module_cli.create_bitable_table self.module_cli.create_bitable_table =", "self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError)", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_record(self):", "e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self):", "in f\"{e}\" def test_mock_get_token_create_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_field = origin_func def test_mock_self_func_update_bitable_field(self):", "pylark.BatchCreateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())", "= self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record(", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "e.value.msg def test_mock_raw_request_get_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", )", "self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record", "in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\"", "def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\", ) )", "0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", )", "test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_view_list( pylark.GetBitableViewListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type", "in f\"{e}\" self.module_cli.get_bitable_field_list = origin_func def test_mock_self_func_create_bitable_field(self): origin_func = self.module_cli.create_bitable_field self.module_cli.create_bitable_field = mock", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert", "self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_meta", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e:", "e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", )", "= origin_func def test_mock_self_func_create_bitable_view(self): origin_func = self.module_cli.create_bitable_view self.module_cli.create_bitable_view = mock with pytest.raises(pylark.PyLarkError) as", "e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) )", "pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq(", "class TestBitableSampleRealRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs) self.cli = app_no_permission.ins() self.module_cli", "origin_func def test_mock_self_func_update_bitable_field(self): origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e:", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() )", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError) as", "self.module_cli.delete_bitable_table = origin_func def test_mock_self_func_batch_delete_bitable_table(self): origin_func = self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError)", "\"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_update_bitable_record = origin_func def test_mock_self_func_delete_bitable_record(self): origin_func = self.module_cli.delete_bitable_record self.module_cli.delete_bitable_record =", "self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record(", "f\"{e}\" self.module_cli.create_bitable_table = origin_func def test_mock_self_func_batch_create_bitable_table(self): origin_func = self.module_cli.batch_create_bitable_table self.module_cli.batch_create_bitable_table = mock with", "*args, **kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self):", "origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\"", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record =", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError)", "pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def test_mock_self_func_get_bitable_field_list(self): origin_func =", "test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert", "e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_create_bitable_record(self): with", "def mock(*args, **kwargs): raise pylark.PyLarkError(scope=\"scope\", func=\"func\", code=1, msg=\"mock-failed\") def mock_raw_request(*args, **kwargs): raise pylark.PyLarkError(", "as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "origin_func def test_mock_self_func_batch_create_bitable_record(self): origin_func = self.module_cli.batch_create_bitable_record self.module_cli.batch_create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", ) ) assert", "**kwargs): super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func", "= origin_func def test_mock_self_func_get_bitable_meta(self): origin_func = self.module_cli.get_bitable_meta self.module_cli.get_bitable_meta = mock with pytest.raises(pylark.PyLarkError) as", "in f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self,", "self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code", "as e: self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\"", "def test_real_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list( pylark.GetBitableFieldListReq( app_token=\"x\", table_id=\"x\", ) ) assert", "origin_func def test_mock_self_func_get_bitable_record(self): origin_func = self.module_cli.get_bitable_record self.module_cli.get_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e:", "**kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable def test_mock_self_func_get_bitable_view_list(self): origin_func = self.module_cli.get_bitable_view_list self.module_cli.get_bitable_view_list", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e:", "def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable", "assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as", "f\"{e}\" self.module_cli.get_bitable_meta = origin_func # mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args,", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_create_bitable_table( pylark.BatchCreateBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_create_bitable_table", "assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_field = origin_func def test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field", "self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def test_mock_raw_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError)", "f\"{e}\" def test_mock_get_token_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=failed\" in", "NOT EDIT. import unittest import pylark import pytest from tests.test_conf import app_all_permission, app_no_permission", "test_mock_get_token_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq()) assert \"msg=failed\" in f\"{e}\" # mock mock", "test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert", "> 0 def test_real_request_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field( pylark.UpdateBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\",", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record( pylark.DeleteBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type is", "**kwargs) self.cli = app_no_permission.ins() self.module_cli = self.cli.bitable def test_real_request_get_bitable_view_list(self): with pytest.raises(pylark.PyLarkError) as e:", "= self.module_cli.batch_delete_bitable_table self.module_cli.batch_delete_bitable_table = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert", "pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func =", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(", "test_mock_self_func_update_bitable_record(self): origin_func = self.module_cli.update_bitable_record self.module_cli.update_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert", "self.module_cli.delete_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_record", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record(pylark.GetBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record = origin_func def", "test_mock_self_func_delete_bitable_field(self): origin_func = self.module_cli.delete_bitable_field self.module_cli.delete_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert", "self.module_cli.create_bitable_view( pylark.CreateBitableViewReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.create_bitable_record = origin_func", "f\"{e}\" self.module_cli.batch_create_bitable_table = origin_func def test_mock_self_func_delete_bitable_table(self): origin_func = self.module_cli.delete_bitable_table self.module_cli.delete_bitable_table = mock with", "f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self): origin_func = self.module_cli.get_bitable_table_list self.module_cli.get_bitable_table_list = mock with", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq(", "as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.get_bitable_record_list = origin_func def test_mock_self_func_get_bitable_record(self): origin_func", "def test_mock_get_token_batch_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=failed\" in f\"{e}\"", "as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_field = origin_func def test_mock_self_func_get_bitable_table_list(self):", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg", ") assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_delete_bitable_field(self): with pytest.raises(pylark.PyLarkError)", "pylark.PyLarkError assert e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_meta(self): with pytest.raises(pylark.PyLarkError)", "test_mock_get_token_delete_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_batch_delete_bitable_table(self): with", "def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_field(self):", "\"msg=failed\" in f\"{e}\" def test_mock_get_token_update_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=failed\" in", "in e.value.msg def test_mock_raw_request_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view( pylark.DeleteBitableViewReq( app_token=\"x\", table_id=\"x\", view_id=\"x\",", "app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_create_bitable_table(self):", "e.value.code > 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_batch_delete_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e:", "e: self.module_cli.delete_bitable_field( pylark.DeleteBitableFieldReq( app_token=\"x\", table_id=\"x\", field_id=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "as e: self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.update_bitable_record = origin_func def test_mock_self_func_batch_update_bitable_record(self): origin_func", "as e: self.module_cli.create_bitable_view(pylark.CreateBitableViewReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_delete_bitable_view(self): with pytest.raises(pylark.PyLarkError) as e:", "e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e:", "with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record( pylark.GetBitableRecordReq( app_token=\"x\", table_id=\"x\", record_id=\"x\", ) ) assert e.type", "e: self.module_cli.get_bitable_table_list( pylark.GetBitableTableListReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert e.value.code >", "test_mock_get_token_batch_update_bitable_record(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.batch_update_bitable_record( pylark.BatchUpdateBitableRecordReq() ) assert \"msg=failed\" in f\"{e}\" def", "mock raw request class TestBitableSampleMockRawRequestFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_table( pylark.DeleteBitableTableReq( app_token=\"x\", table_id=\"x\", ) ) assert e.type is pylark.PyLarkError", "0 def test_real_request_create_bitable_table(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_table( pylark.CreateBitableTableReq( app_token=\"x\", ) ) assert", "self.module_cli.batch_delete_bitable_table( pylark.BatchDeleteBitableTableReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_table = origin_func def test_mock_self_func_get_bitable_meta(self): origin_func", "origin_func = self.module_cli.get_bitable_field_list self.module_cli.get_bitable_field_list = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq()) assert \"msg=mock-failed\"", "TestBitableSampleMockGetTokenFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.cli.auth.get_tenant_access_token =", "as e: self.module_cli.batch_delete_bitable_record( pylark.BatchDeleteBitableRecordReq() ) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.batch_delete_bitable_record = origin_func def", "origin_func = self.module_cli.update_bitable_field self.module_cli.update_bitable_field = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq()) assert \"msg=mock-failed\"", "pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_meta( pylark.GetBitableMetaReq( app_token=\"x\", ) ) assert e.type is pylark.PyLarkError assert", "lark_sdk_gen. DO NOT EDIT. import unittest import pylark import pytest from tests.test_conf import", "0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list( pylark.GetBitableRecordListReq(", ") ) assert e.type is pylark.PyLarkError assert e.value.code > 0 def test_real_request_get_bitable_meta(self): with", "e.value.code > 0 def test_real_request_create_bitable_field(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_field( pylark.CreateBitableFieldReq( app_token=\"x\", table_id=\"x\",", "> 0 assert \"mock-raw-request-failed\" in e.value.msg def test_mock_raw_request_get_bitable_field_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_field_list(", "super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs) self.cli = app_all_permission.ins() self.module_cli = self.cli.bitable self.cli.raw_request = mock_raw_request def", "self.module_cli.create_bitable_view = origin_func def test_mock_self_func_delete_bitable_view(self): origin_func = self.module_cli.delete_bitable_view self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError)", "f\"{e}\" self.module_cli.get_bitable_record = origin_func def test_mock_self_func_create_bitable_record(self): origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with", "test_mock_get_token_get_bitable_table_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq()) assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_create_bitable_table(self): with", "origin_func = self.module_cli.create_bitable_record self.module_cli.create_bitable_record = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq()) assert \"msg=mock-failed\"", "self.module_cli.delete_bitable_view = mock with pytest.raises(pylark.PyLarkError) as e: self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq()) assert \"msg=mock-failed\" in f\"{e}\" self.module_cli.delete_bitable_view", "f\"{e}\" # mock mock self func class TestBitableSampleMockSelfFuncFailed(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestBitableSampleMockSelfFuncFailed,", "assert \"msg=failed\" in f\"{e}\" def test_mock_get_token_get_bitable_record_list(self): with pytest.raises(pylark.PyLarkError) as e: self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq()) assert \"msg=failed\"" ]
[ "operation, and let angr proceed normally \"\"\" if any(is_tainted(a) for a in args):", "from taint import is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\"", "in args): #l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state,", "arguments to irop, which will all be claripy objects (instances of claripy.ast.Base) return:", "(requires our fork of angr to actually respect the hook) \"\"\" def do_op(self,", "the operation, and let angr proceed normally \"\"\" if any(is_tainted(a) for a in", "the result of the operation; or None to refrain from hooking the operation,", "irop, which will all be claripy objects (instances of claripy.ast.Base) return: claripy object", "of angr to actually respect the hook) \"\"\" def do_op(self, state, irop, args):", "IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations performed in the symbolic execution.", "as the result of the operation; or None to refrain from hooking the", "all be claripy objects (instances of claripy.ast.Base) return: claripy object to use as", "result of the operation; or None to refrain from hooking the operation, and", "import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of", "angr to actually respect the hook) \"\"\" def do_op(self, state, irop, args): \"\"\"", "angr proceed normally \"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {}", "args: arguments to irop, which will all be claripy objects (instances of claripy.ast.Base)", "the hook) \"\"\" def do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args:", "the computation of operations performed in the symbolic execution. (requires our fork of", "def do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop,", "\"\"\" def do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to", "args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop, which will all be", "unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo def copy(self, memo):", "= logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations performed in", "execution. (requires our fork of angr to actually respect the hook) \"\"\" def", "any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop,", "import is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking", "is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the", "fork of angr to actually respect the hook) \"\"\" def do_op(self, state, irop,", "a in args): #l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop, args)) return", "\"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {} on {} with", "object to use as the result of the operation; or None to refrain", "hook) \"\"\" def do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments", "#l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits)", "irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop, which will all", "operation; or None to refrain from hooking the operation, and let angr proceed", "irop: an angr.vex.engines.SimIROp args: arguments to irop, which will all be claripy objects", "to refrain from hooking the operation, and let angr proceed normally \"\"\" if", "of operations performed in the symbolic execution. (requires our fork of angr to", "do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop, which", "class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations performed in the symbolic", "secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo def copy(self, memo): return", "(instances of claripy.ast.Base) return: claripy object to use as the result of the", "claripy objects (instances of claripy.ast.Base) return: claripy object to use as the result", "hooking the operation, and let angr proceed normally \"\"\" if any(is_tainted(a) for a", "args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo def copy(self, memo): return IROpHook()", "and let angr proceed normally \"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing", "let angr proceed normally \"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation", "be claripy objects (instances of claripy.ast.Base) return: claripy object to use as the", "an angr.vex.engines.SimIROp args: arguments to irop, which will all be claripy objects (instances", "of claripy.ast.Base) return: claripy object to use as the result of the operation;", "{} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo def", "for a in args): #l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop, args))", "to actually respect the hook) \"\"\" def do_op(self, state, irop, args): \"\"\" irop:", "\"\"\" Allows hooking the computation of operations performed in the symbolic execution. (requires", "computation of operations performed in the symbolic execution. (requires our fork of angr", "from hooking the operation, and let angr proceed normally \"\"\" if any(is_tainted(a) for", "the operation; or None to refrain from hooking the operation, and let angr", "actually respect the hook) \"\"\" def do_op(self, state, irop, args): \"\"\" irop: an", "import angr from taint import is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class", "in the symbolic execution. (requires our fork of angr to actually respect the", "return: claripy object to use as the result of the operation; or None", "to irop, which will all be claripy objects (instances of claripy.ast.Base) return: claripy", "claripy object to use as the result of the operation; or None to", "args): #l.debug(\"Replacing operation {} on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\",", "refrain from hooking the operation, and let angr proceed normally \"\"\" if any(is_tainted(a)", "the symbolic execution. (requires our fork of angr to actually respect the hook)", "normally \"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {} on {}", "claripy.ast.Base) return: claripy object to use as the result of the operation; or", "operations performed in the symbolic execution. (requires our fork of angr to actually", "{} on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None", "angr.vex.engines.SimIROp args: arguments to irop, which will all be claripy objects (instances of", "or None to refrain from hooking the operation, and let angr proceed normally", "with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo def copy(self,", "our fork of angr to actually respect the hook) \"\"\" def do_op(self, state,", "proceed normally \"\"\" if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {} on", "will all be claripy objects (instances of claripy.ast.Base) return: claripy object to use", "on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return None @angr.SimStatePlugin.memo", "None to refrain from hooking the operation, and let angr proceed normally \"\"\"", "which will all be claripy objects (instances of claripy.ast.Base) return: claripy object to", "use as the result of the operation; or None to refrain from hooking", "angr from taint import is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin):", "operation {} on {} with unconstrained secret\".format(irop, args)) return taintedUnconstrainedBits(state, \"secret\", irop._output_size_bits) return", "objects (instances of claripy.ast.Base) return: claripy object to use as the result of", "taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation", "hooking the computation of operations performed in the symbolic execution. (requires our fork", "state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop, which will", "of the operation; or None to refrain from hooking the operation, and let", "Allows hooking the computation of operations performed in the symbolic execution. (requires our", "respect the hook) \"\"\" def do_op(self, state, irop, args): \"\"\" irop: an angr.vex.engines.SimIROp", "performed in the symbolic execution. (requires our fork of angr to actually respect", "taint import is_tainted, taintedUnconstrainedBits import logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows", "\"\"\" irop: an angr.vex.engines.SimIROp args: arguments to irop, which will all be claripy", "symbolic execution. (requires our fork of angr to actually respect the hook) \"\"\"", "to use as the result of the operation; or None to refrain from", "logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations performed in the", "logging l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations", "if any(is_tainted(a) for a in args): #l.debug(\"Replacing operation {} on {} with unconstrained", "l = logging.getLogger(__name__) class IROpHook(angr.SimStatePlugin): \"\"\" Allows hooking the computation of operations performed" ]
[ "QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only() return", "person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", {", "inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\"", "\"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of", "def list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\":", "= get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i>", "r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via", "request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), },", "\"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\": \"get_people_values\", \"func_source\": inspect.getsource(get_people_values), }, )", "\"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet", "using \"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\",", "QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration", "get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List", "= get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\":", "QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using explict", "= get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\":", "\"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, )", "list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using", "select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using", "\"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request):", "r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via", "person/observatory/site using explict retrieval of relevant values\"\"\" people = get_people_values() return render( request,", "def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive()", "def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of", "people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people", "import inspect from django.shortcuts import render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related,", "person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive() return render( request,", "def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\" people = get_people_select_related()", "\"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, )", "render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request):", "r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List", "( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\") def", "import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\")", "def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render(", "get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site", "render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive),", ") def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return", "request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\": \"get_people_values\", \"func_source\": inspect.getsource(get_people_values), },", "return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\":", "retrieval of relevant values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\":", "people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request,", "r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only()", "\"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site", "via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\":", "people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\",", "inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\"", "people = get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\",", "return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\":", "via iteration of QuerySet using select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\",", "people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List", "\"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\",", "render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related),", "\"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, )", "\"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive() return render(", "people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()", "Person QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\":", "{ \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def", "QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration", "\"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only() return render(", "render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\": \"get_people_values\", \"func_source\": inspect.getsource(get_people_values),", "request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), },", "list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\" people = get_people_select_related() return", "\"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using", "people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List", "}, ) def list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\":", "of relevant values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people,", ") def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\" people =", "inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people =", "get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\",", "of Person QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people,", "}, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\" people", "iteration of Person QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\":", "<i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using", "<gh_stars>0 import inspect from django.shortcuts import render from people.get_people import ( get_people_naive, get_people_select_related_only,", "\"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only() return render( request,", "{ \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), },", "QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site", "get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\",", "get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\") def list_people_naive(request):", "explict retrieval of relevant values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\", {", "QuerySet using select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people,", "\"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people =", "import render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def", ") def list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people,", "\"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\" people = get_people_select_related() return render(", "return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\":", "return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\":", "inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", {", ") def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration", "using select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\":", "\"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only),", "}, ) def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\" people", "\"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval", "get_people_values, ) def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\"", "\"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant", "\"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of", "get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\",", "\"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def list_people_values(request):", "get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\",", "people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.only()\",", "QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive", "{ \"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def", "get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\": \"get_people_values\",", "= get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\":", "\"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people =", "of QuerySet using select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", { \"people\":", "relevant values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\":", "\"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people", "using explict retrieval of relevant values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\",", "QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using", "\"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request):", "render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\",", "get_people_qs_only, get_people_values, ) def index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using", "render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only),", "\"func_source\": inspect.getsource(get_people_naive), }, ) def list_people_select_related_only(request): people = get_people_select_related_only() return render( request, \"people/list_people_fastest.html\",", "request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\":", "iteration of QuerySet using select_related\"\"\" people = get_people_select_related() return render( request, \"people/list_people_fastest.html\", {", "from django.shortcuts import render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values,", "\"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of", "people = get_people_naive() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\",", "iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people,", "\"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\" people = get_people_values() return render(", "people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, ) def", "{ \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def", "django.shortcuts import render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, )", "request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Naive QuerySet.all()\", \"description\": \"get_people_naive\", \"func_source\": inspect.getsource(get_people_naive), },", "of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\":", "values\"\"\" people = get_people_values() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using", "return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.values()\", \"description\": \"get_people_values\", \"func_source\":", "person/observatory/site via iteration of QuerySet using select_related\"\"\" people = get_people_select_related() return render( request,", "inspect from django.shortcuts import render from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only,", "\"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\"", "def list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\" people = get_people_values()", "\"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive() return render( request, \"people/list_people_fastest.html\", {", ") def list_people_select_related(request): \"\"\"List person/observatory/site via iteration of QuerySet using select_related\"\"\" people =", "list_people_values(request): \"\"\"List person/observatory/site using explict retrieval of relevant values\"\"\" people = get_people_values() return", "\"title\": r\"Using QuerySet.select_related()\", \"description\": \"get_people_select_related_only\", \"func_source\": inspect.getsource(get_people_select_related_only), }, ) def list_people_select_related(request): \"\"\"List person/observatory/site", "}, ) def list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only()", "render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people", "return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\"", "\"people\": people, \"title\": r\"Using QuerySet.select_related() <i>and</i> QuerySet.only()\", \"description\": \"get_people_qs_only\", \"func_source\": inspect.getsource(get_people_qs_only), }, )", "list_people_qs_only(request): \"\"\"List person/observatory/site via iteration of QuerySet.only()\"\"\" people = get_people_qs_only() return render( request,", "index(request): return render(request, \"people/index.html\") def list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person", "= get_people_select_related_only() return render( request, \"people/list_people_fastest.html\", { \"people\": people, \"title\": r\"Using QuerySet.select_related()\", \"description\":", "from people.get_people import ( get_people_naive, get_people_select_related_only, get_people_select_related, get_people_qs_only, get_people_values, ) def index(request): return", "\"people\": people, \"title\": r\"Using QuerySet.only()\", \"description\": \"get_people_select_related\", \"func_source\": inspect.getsource(get_people_select_related), }, ) def list_people_qs_only(request):", "list_people_naive(request): \"\"\"List person/observatory/site using \"naive\" iteration of Person QuerySet\"\"\" people = get_people_naive() return" ]
[ "def _containsDuplicate(self, nums: List[int]) -> bool: hash_set = set() for num in nums:", "coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date", "\"\"\" class Solution: def containsDuplicate(self, nums: List[int]) -> bool: # exception case if", "Example 1: Input: nums = [1,2,3,1] Output: true Example 2: Input: nums =", "init instance solution = Solution() # run & time start = time.process_time() ans", "return true if any value appears at least twice in the array, and", ": Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import", "true # nums = [1, 2, 3, 1] # Example 2: Output: false", "= [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i]", "1: return False # main method: (scan once, store numbers in hash set,", "# -*- coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author :", "appears at least twice in the array, and return false if every element", "type if len(nums) == 1: return False # main method: (scan once, store", "2: Output: false # nums = [1, 2, 3, 4] # Example 3:", "end = time.process_time() # show answer print('\\nAnswer:') print(ans) # show time consumption print('Running", "= time.process_time() # show answer print('\\nAnswer:') print(ans) # show time consumption print('Running Time:", "answer print('\\nAnswer:') print(ans) # show time consumption print('Running Time: %.5f ms' % ((end", "time from typing import List # import functools \"\"\" LeetCode - 0217 -", "store numbers in hash set, if duplicate, stop and return True) return self._containsDuplicate(nums)", "main(): # Example 1: Output: true # nums = [1, 2, 3, 1]", "time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:') print(ans) #", "time consumption print('Running Time: %.5f ms' % ((end - start) * 1000)) if", "-> bool: # exception case if not isinstance(nums, list) or len(nums) <= 0:", "the array, and return false if every element is distinct. Example 1: Input:", "integer array nums, return true if any value appears at least twice in", "bool: # exception case if not isinstance(nums, list) or len(nums) <= 0: return", "- 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an", "if not isinstance(nums, list) or len(nums) <= 0: return False # Error input", "4, 2] # init instance solution = Solution() # run & time start", "return False # Error input type if len(nums) == 1: return False #", "# show time consumption print('Running Time: %.5f ms' % ((end - start) *", "least twice in the array, and return false if every element is distinct.", "method: (scan once, store numbers in hash set, if duplicate, stop and return", "if len(nums) == 1: return False # main method: (scan once, store numbers", "time start = time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() # show answer", "0: return False # Error input type if len(nums) == 1: return False", "if duplicate, stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) ->", "python # -*- coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author", "- (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array", "# init instance solution = Solution() # run & time start = time.process_time()", "nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <= 10^5 -10^9 <=", "Output: true nums = [1, 1, 1, 3, 3, 4, 3, 2, 4,", "@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\"", "in nums: if num in hash_set: return True hash_set.add(num) return False def main():", "hash_set = set() for num in nums: if num in hash_set: return True", "[1,2,3,1] Output: true Example 2: Input: nums = [1,2,3,4] Output: false Example 3:", "1] # Example 2: Output: false # nums = [1, 2, 3, 4]", "# Example 1: Output: true # nums = [1, 2, 3, 1] #", "4, 3, 2, 4, 2] # init instance solution = Solution() # run", "Input: nums = [1,2,3,1] Output: true Example 2: Input: nums = [1,2,3,4] Output:", "solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:') print(ans) # show time consumption", "1: Input: nums = [1,2,3,1] Output: true Example 2: Input: nums = [1,2,3,4]", "return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set = set() for num", "[1,2,3,4] Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1", "= [1,2,3,1] Output: true Example 2: Input: nums = [1,2,3,4] Output: false Example", "0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer", "[1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i] <=", "[1, 2, 3, 4] # Example 3: Output: true nums = [1, 1,", "[1, 2, 3, 1] # Example 2: Output: false # nums = [1,", "Solution() # run & time start = time.process_time() ans = solution.containsDuplicate(nums) end =", "nums = [1,2,3,1] Output: true Example 2: Input: nums = [1,2,3,4] Output: false", "\"\"\" LeetCode - 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement:", "for num in nums: if num in hash_set: return True hash_set.add(num) return False", "return False def main(): # Example 1: Output: true # nums = [1,", "2, 3, 1] # Example 2: Output: false # nums = [1, 2,", "Example 2: Output: false # nums = [1, 2, 3, 4] # Example", "Output: true # nums = [1, 2, 3, 1] # Example 2: Output:", "not isinstance(nums, list) or len(nums) <= 0: return False # Error input type", "in hash_set: return True hash_set.add(num) return False def main(): # Example 1: Output:", "3, 4] # Example 3: Output: true nums = [1, 1, 1, 3,", "self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set = set() for num in", "hash set, if duplicate, stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums:", "in the array, and return false if every element is distinct. Example 1:", "hash_set: return True hash_set.add(num) return False def main(): # Example 1: Output: true", "= [1, 2, 3, 1] # Example 2: Output: false # nums =", "LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import time from", "consumption print('Running Time: %.5f ms' % ((end - start) * 1000)) if __name__", "3, 1] # Example 2: Output: false # nums = [1, 2, 3,", "ans = solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:') print(ans) # show", "stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set", "2, 4, 2] # init instance solution = Solution() # run & time", "== 1: return False # main method: (scan once, store numbers in hash", "False def main(): # Example 1: Output: true # nums = [1, 2,", "Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array nums, return true", "1 <= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class Solution:", "print('\\nAnswer:') print(ans) # show time consumption print('Running Time: %.5f ms' % ((end -", "if every element is distinct. Example 1: Input: nums = [1,2,3,1] Output: true", "or len(nums) <= 0: return False # Error input type if len(nums) ==", "Time: %.5f ms' % ((end - start) * 1000)) if __name__ == \"__main__\":", "<= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums:", "nums = [1, 2, 3, 4] # Example 3: Output: true nums =", "= [1, 1, 1, 3, 3, 4, 3, 2, 4, 2] # init", "numbers in hash set, if duplicate, stop and return True) return self._containsDuplicate(nums) def", "any value appears at least twice in the array, and return false if", "Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <=", "2] # init instance solution = Solution() # run & time start =", "= [1, 2, 3, 4] # Example 3: Output: true nums = [1,", "every element is distinct. Example 1: Input: nums = [1,2,3,1] Output: true Example", "==================================================================\"\"\" import sys import time from typing import List # import functools \"\"\"", "%.5f ms' % ((end - start) * 1000)) if __name__ == \"__main__\": sys.exit(main())", "import List # import functools \"\"\" LeetCode - 0217 - (Easy) - Contains", "@Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import time from typing", "return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set = set()", "list) or len(nums) <= 0: return False # Error input type if len(nums)", "containsDuplicate(self, nums: List[int]) -> bool: # exception case if not isinstance(nums, list) or", "true if any value appears at least twice in the array, and return", "nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int]) -> bool: #", "3, 3, 4, 3, 2, 4, 2] # init instance solution = Solution()", "# exception case if not isinstance(nums, list) or len(nums) <= 0: return False", "start = time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:')", "Output: false # nums = [1, 2, 3, 4] # Example 3: Output:", "@File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import", "return False # main method: (scan once, store numbers in hash set, if", "nums: List[int]) -> bool: hash_set = set() for num in nums: if num", "List[int]) -> bool: # exception case if not isinstance(nums, list) or len(nums) <=", "<filename>LeetCode-All-Solution/Python3/LC-0217-Contains-Duplicate.py<gh_stars>0 #!/usr/bin/env python # -*- coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File :", "false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length", "#!/usr/bin/env python # -*- coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py", "4] # Example 3: Output: true nums = [1, 1, 1, 3, 3,", "at least twice in the array, and return false if every element is", "@Date : 2022-02-25 ==================================================================\"\"\" import sys import time from typing import List #", "Description & Requirement: Given an integer array nums, return true if any value", "<= 0: return False # Error input type if len(nums) == 1: return", ": 2022-02-25 ==================================================================\"\"\" import sys import time from typing import List # import", "nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self,", "def containsDuplicate(self, nums: List[int]) -> bool: # exception case if not isinstance(nums, list)", "true Example 2: Input: nums = [1,2,3,4] Output: false Example 3: Input: nums", "isinstance(nums, list) or len(nums) <= 0: return False # Error input type if", "run & time start = time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() #", "true nums = [1, 1, 1, 3, 3, 4, 3, 2, 4, 2]", "duplicate, stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool:", "List[int]) -> bool: hash_set = set() for num in nums: if num in", "3, 2, 4, 2] # init instance solution = Solution() # run &", "solution = Solution() # run & time start = time.process_time() ans = solution.containsDuplicate(nums)", "import time from typing import List # import functools \"\"\" LeetCode - 0217", "print('Running Time: %.5f ms' % ((end - start) * 1000)) if __name__ ==", "# main method: (scan once, store numbers in hash set, if duplicate, stop", "<= nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int]) -> bool:", "10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int]) -> bool: # exception case", "once, store numbers in hash set, if duplicate, stop and return True) return", "_containsDuplicate(self, nums: List[int]) -> bool: hash_set = set() for num in nums: if", "Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array nums, return true if", "value appears at least twice in the array, and return false if every", "Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <= 10^5 -10^9", "if num in hash_set: return True hash_set.add(num) return False def main(): # Example", "Solution: def containsDuplicate(self, nums: List[int]) -> bool: # exception case if not isinstance(nums,", ": [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import time from typing import", "# import functools \"\"\" LeetCode - 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/", "element is distinct. Example 1: Input: nums = [1,2,3,1] Output: true Example 2:", "Requirement: Given an integer array nums, return true if any value appears at", "# Example 3: Output: true nums = [1, 1, 1, 3, 3, 4,", "[YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import time from typing import List", "true Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\"", "Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys", ": LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25 ==================================================================\"\"\" import sys import time", "bool: hash_set = set() for num in nums: if num in hash_set: return", "input type if len(nums) == 1: return False # main method: (scan once,", "len(nums) <= 0: return False # Error input type if len(nums) == 1:", "show time consumption print('Running Time: %.5f ms' % ((end - start) * 1000))", "3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <= nums.length <= 10^5", "instance solution = Solution() # run & time start = time.process_time() ans =", "print(ans) # show time consumption print('Running Time: %.5f ms' % ((end - start)", "and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set =", "= Solution() # run & time start = time.process_time() ans = solution.containsDuplicate(nums) end", "& time start = time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() # show", "nums: if num in hash_set: return True hash_set.add(num) return False def main(): #", "sys import time from typing import List # import functools \"\"\" LeetCode -", "false if every element is distinct. Example 1: Input: nums = [1,2,3,1] Output:", "False # Error input type if len(nums) == 1: return False # main", "nums = [1, 1, 1, 3, 3, 4, 3, 2, 4, 2] #", "-> bool: hash_set = set() for num in nums: if num in hash_set:", "Example 1: Output: true # nums = [1, 2, 3, 1] # Example", "if any value appears at least twice in the array, and return false", "show answer print('\\nAnswer:') print(ans) # show time consumption print('Running Time: %.5f ms' %", "in hash set, if duplicate, stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self,", "typing import List # import functools \"\"\" LeetCode - 0217 - (Easy) -", "num in nums: if num in hash_set: return True hash_set.add(num) return False def", "nums: List[int]) -> bool: # exception case if not isinstance(nums, list) or len(nums)", "# Example 2: Output: false # nums = [1, 2, 3, 4] #", "3, 4, 3, 2, 4, 2] # init instance solution = Solution() #", "set, if duplicate, stop and return True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int])", "List # import functools \"\"\" LeetCode - 0217 - (Easy) - Contains Duplicate", "hash_set.add(num) return False def main(): # Example 1: Output: true # nums =", "return false if every element is distinct. Example 1: Input: nums = [1,2,3,1]", "from typing import List # import functools \"\"\" LeetCode - 0217 - (Easy)", "nums, return true if any value appears at least twice in the array,", "functools \"\"\" LeetCode - 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description &", "2: Input: nums = [1,2,3,4] Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2]", "nums = [1,2,3,4] Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true", "= [1,2,3,4] Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints:", "array, and return false if every element is distinct. Example 1: Input: nums", "Error input type if len(nums) == 1: return False # main method: (scan", "false # nums = [1, 2, 3, 4] # Example 3: Output: true", "an integer array nums, return true if any value appears at least twice", "True hash_set.add(num) return False def main(): # Example 1: Output: true # nums", "exception case if not isinstance(nums, list) or len(nums) <= 0: return False #", "- Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array nums, return", "& Requirement: Given an integer array nums, return true if any value appears", "array nums, return true if any value appears at least twice in the", "distinct. Example 1: Input: nums = [1,2,3,1] Output: true Example 2: Input: nums", "len(nums) == 1: return False # main method: (scan once, store numbers in", "LeetCode - 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given", "twice in the array, and return false if every element is distinct. Example", "<= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class Solution: def", "case if not isinstance(nums, list) or len(nums) <= 0: return False # Error", "time.process_time() # show answer print('\\nAnswer:') print(ans) # show time consumption print('Running Time: %.5f", "import functools \"\"\" LeetCode - 0217 - (Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description", "\"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date : 2022-02-25", "3: Output: true nums = [1, 1, 1, 3, 3, 4, 3, 2,", "[1, 1, 1, 3, 3, 4, 3, 2, 4, 2] # init instance", "False # main method: (scan once, store numbers in hash set, if duplicate,", "Given an integer array nums, return true if any value appears at least", "Example 2: Input: nums = [1,2,3,4] Output: false Example 3: Input: nums =", "Input: nums = [1,2,3,4] Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output:", "return True hash_set.add(num) return False def main(): # Example 1: Output: true #", "# nums = [1, 2, 3, 1] # Example 2: Output: false #", "# run & time start = time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time()", "2022-02-25 ==================================================================\"\"\" import sys import time from typing import List # import functools", "Output: false Example 3: Input: nums = [1,1,1,3,3,4,3,2,4,2] Output: true Constraints: 1 <=", "2, 3, 4] # Example 3: Output: true nums = [1, 1, 1,", "= time.process_time() ans = solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:') print(ans)", "and return false if every element is distinct. Example 1: Input: nums =", "= solution.containsDuplicate(nums) end = time.process_time() # show answer print('\\nAnswer:') print(ans) # show time", "1, 1, 3, 3, 4, 3, 2, 4, 2] # init instance solution", "Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class", "(scan once, store numbers in hash set, if duplicate, stop and return True)", "-*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin) @Date :", "# show answer print('\\nAnswer:') print(ans) # show time consumption print('Running Time: %.5f ms'", "True) return self._containsDuplicate(nums) def _containsDuplicate(self, nums: List[int]) -> bool: hash_set = set() for", "Example 3: Output: true nums = [1, 1, 1, 3, 3, 4, 3,", "10^5 -10^9 <= nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int])", "num in hash_set: return True hash_set.add(num) return False def main(): # Example 1:", "1, 3, 3, 4, 3, 2, 4, 2] # init instance solution =", "https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array nums, return true if any", "class Solution: def containsDuplicate(self, nums: List[int]) -> bool: # exception case if not", "Output: true Example 2: Input: nums = [1,2,3,4] Output: false Example 3: Input:", "# nums = [1, 2, 3, 4] # Example 3: Output: true nums", "<= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int]) -> bool: # exception", "= set() for num in nums: if num in hash_set: return True hash_set.add(num)", "1: Output: true # nums = [1, 2, 3, 1] # Example 2:", "set() for num in nums: if num in hash_set: return True hash_set.add(num) return", "def main(): # Example 1: Output: true # nums = [1, 2, 3,", "-10^9 <= nums[i] <= 10^9 \"\"\" class Solution: def containsDuplicate(self, nums: List[int]) ->", "nums = [1, 2, 3, 1] # Example 2: Output: false # nums", "Output: true Constraints: 1 <= nums.length <= 10^5 -10^9 <= nums[i] <= 10^9", "# Error input type if len(nums) == 1: return False # main method:", "(Easy) - Contains Duplicate https://leetcode.com/problems/contains-duplicate/ Description & Requirement: Given an integer array nums,", "main method: (scan once, store numbers in hash set, if duplicate, stop and", "-*- coding:utf-8 -*- \"\"\"================================================================= @Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3 @File : LC-0217-Contains-Duplicate.py @Author : [YuweiYin](https://github.com/YuweiYin)", "import sys import time from typing import List # import functools \"\"\" LeetCode", "is distinct. Example 1: Input: nums = [1,2,3,1] Output: true Example 2: Input:" ]
[ "title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element", "element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies", "all_movies: return True return False def get_recommendations(title, n): names = [] movie_user_likes =", "df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features:", "for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer()", "cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0]", "row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title): if", "get_recommendations(title, n): names = [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies =", "get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0]))", "names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for", "return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title):", "CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title):", "= [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies =", "for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director']", "if title in all_movies: return True return False def get_recommendations(title, n): names =", "return False def get_recommendations(title, n): names = [] movie_user_likes = title movie_index =", "get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row):", "in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix =", "get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\"", "= df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix = cv.fit_transform(df[\"combined_features\"]) cosine_sim =", "movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in", "= df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv", "\"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title): if title in", "[] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda", "list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df", "= pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature]", "= ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"]", "== index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords']", "= title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for", "similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return", "def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\"", "except: print (\"Error:\", row ) def check_movie(title): if title in all_movies: return True", "n): names = [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index]))", "+\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title): if title", "row ) def check_movie(title): if title in all_movies: return True return False def", "import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer from", "in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies =", "sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower()", "title in all_movies: return True return False def get_recommendations(title, n): names = []", "def get_recommendations(title, n): names = [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies", "import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def", "numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index):", "pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise", "return True return False def get_recommendations(title, n): names = [] movie_user_likes = title", "names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in", "\"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title): if title in all_movies: return", "return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"]", "x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features =", "return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try:", "title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\",", "False def get_recommendations(title, n): names = [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes)", "df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix = cv.fit_transform(df[\"combined_features\"]) cosine_sim = cosine_similarity(count_matrix)", "np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index", "combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row )", "return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature", "try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def", "def check_movie(title): if title in all_movies: return True return False def get_recommendations(title, n):", "as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return", "\"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row ) def check_movie(title): if title in all_movies:", "sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df =", "['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] =", "pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature] =", "= sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\")", "df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except:", "in all_movies: return True return False def get_recommendations(title, n): names = [] movie_user_likes", "= list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names", "feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix", "df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix = cv.fit_transform(df[\"combined_features\"]) cosine_sim", ") def check_movie(title): if title in all_movies: return True return False def get_recommendations(title,", "all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1)", "== title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print", "def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def", "pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity", "import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() ==", "True return False def get_recommendations(title, n): names = [] movie_user_likes = title movie_index", "check_movie(title): if title in all_movies: return True return False def get_recommendations(title, n): names", "import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def", "features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv = CountVectorizer() count_matrix = cv.fit_transform(df[\"combined_features\"])", "def combine_features(row): try: return row['keywords'] +\" \"+row['cast']+\" \"+row[\"genres\"]+\" \"+row[\"director\"] except: print (\"Error:\", row", "sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features", "= get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True) for element in sorted_similar_movies[1:n+1]:", "features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('')", "names = [] movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies", "df.original_title.str.lower().tolist() for feature in features: df[feature] = df[feature].fillna('') df[\"combined_features\"] = df.apply(combine_features,axis=1) cv =", "as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import", "from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index ==", "sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0]", "df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return", "print (\"Error:\", row ) def check_movie(title): if title in all_movies: return True return", "(\"Error:\", row ) def check_movie(title): if title in all_movies: return True return False", "sorted_similar_movies[1:n+1]: names.append(get_title_from_index(element[0])) return names df = pd.read_csv(\"./data/movie_dataset.csv\") features = ['keywords','cast','genres','director'] all_movies = df.original_title.str.lower().tolist()", "from sklearn.metrics.pairwise import cosine_similarity def get_title_from_index(index): return df[df.index == index][\"title\"].values[0] def get_index_from_title(title): return", "movie_user_likes = title movie_index = get_index_from_title(movie_user_likes) similar_movies = list(enumerate(cosine_sim[movie_index])) sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True)", "index][\"title\"].values[0] def get_index_from_title(title): return df[df.title.str.lower() == title.lower()][\"index\"].values[0] def combine_features(row): try: return row['keywords'] +\"" ]
[]
[ "django_filters from django_filters import CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name =", "import django_filters from django_filters import CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name", "import CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name'", "class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items')", "= 'name' , lookup_expr = 'icontains',label='search food items') class Meta: model = Food", "CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items') class Meta: model =", "lookup_expr = 'icontains',label='search food items') class Meta: model = Food fields = ['food_name']", "import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search", "FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items') class", ".models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr =", "* class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food", ", lookup_expr = 'icontains',label='search food items') class Meta: model = Food fields =", "CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' ,", "from django_filters import CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name", "'name' , lookup_expr = 'icontains',label='search food items') class Meta: model = Food fields", "food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items') class Meta:", "from .models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name = 'name' , lookup_expr", "= CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items') class Meta: model", "django_filters import CharFilter from .models import * class FoodFilter(django_filters.FilterSet): food_name = CharFilter(field_name =" ]
[ "<filename>setup.py from setuptools import setup, find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib',", "from setuptools import setup, find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn',", "'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup(", "'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for", "version='1.1', description='A Python toolkit for personalized gait calibration', author='<NAME>', author_email='<EMAIL>', packages=find_packages(), install_requires=install_requires, )", "'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized", "import setup, find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script',", "name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait calibration', author='<NAME>', author_email='<EMAIL>', packages=find_packages(), install_requires=install_requires,", "'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit", "'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1',", "'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate',", "= [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2',", "] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait calibration', author='<NAME>', author_email='<EMAIL>',", "setup, find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap',", "'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait", "'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait calibration',", "'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python", "setuptools import setup, find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask',", "setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait calibration', author='<NAME>', author_email='<EMAIL>', packages=find_packages(),", "find_packages install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug',", "'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A", "'Jinja2', ] setup( name='gait-calibrate', version='1.1', description='A Python toolkit for personalized gait calibration', author='<NAME>',", "install_requires = [ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh',", "[ 'numpy', 'pandas', 'scipy', 'matplotlib', 'scikit-learn', 'flask', 'flask-script', 'flask-bootstrap', 'werkzeug', 'bokeh', 'Jinja2', ]" ]
[ "rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or create a new", "group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message':", "is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not", "def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token)", "PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed", "+ timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now())", "if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier)", "player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name})", "from rest_framework.response import Response from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import", "= request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name':", "request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save()", "group_name = request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id = request.user.id group_id", "'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List", "request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby", "Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return", "'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List", "= gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score", "= request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id = request.user.id group_id =", "queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name = request.data['group_name'] player", "ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if", "request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name,", "not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier)", "PUSHER_CLIENT import json from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models", "all snippets, or create a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class =", "'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return", "player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed':", "= True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id},", "if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group =", "= y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in", "Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List all", "format=None): identifier = request.GET[\"id\"] if not identifier: return Response({'id': 'This field is required!'},", "import Response from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from", "= PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count()", "= PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return", "return Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets, or create a new", "= Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() +", "player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array})", "class GroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby", "= request.data['minigame_name'] group = request.user.group player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20)", "if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score')", "request, format=None): identifier = request.data['group_id'] if not identifier: return Response({'id': 'This field is", "PlayerSerializer def post(self, request): group_name = request.data['group_name'] player = request.user group = Group.objects.create(name=group_name)", "geocache_group_count = 0 # Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group =", "players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id':", "player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player = request.user", "1 total_score = total_score + geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push", "= Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token})", "geocache_max = gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first():", "AvatarView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "= request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self,", "Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or create a new snippet.", "new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name", "Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id))", "if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1", "response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return", "player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created", "lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self,", "= Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save()", "group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or create a", "json from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame", "serializer_class = PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"] if not identifier:", "snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id =", "= [] for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name,", "+ timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() +", "y_cord = request.data['y'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x =", "\"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name = request.data['icon_name']", "post(self, request, format=None): identifier = request.data['group_id'] if not identifier: return Response({'id': 'This field", "player_name = request.user.name is_leader = request.user.leader player_id = request.user.id group_id = request.user.group.id return", "serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id']", "snippets, or create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer", "0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0", "= Group.objects.create(name=group_name) player.group = group player.leader = True player.save() return Response({ 'player_id': request.user.id,", "request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or create a", "Response({'token': player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if", "django.shortcuts import render from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from", "minigame_name = request.data['minigame_name'] group = request.user.group player = request.user player.last_connection = timezone.now() +", "class PlayerGroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0", "= 0 alias_group_max = 0 alias_max = 0 alias_group_count = 0 quiklash_group_max =", "= ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max", "request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x", "PlayerSerializer def get(self, request): identifier = request.user.group.id if not identifier: return Response({'id': 'This", "for player in players: player_type = 1 if player == request.user: player_type= 3", "Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"] if not", "return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord =", "serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED)", "Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id'] if not identifier: return Response({'id':", "player_id = request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player':", "'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed})", "group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name':", "True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED)", "import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views", "serializer_class = PlayerSerializer def get(self, request): group_name = request.user.group.name player_name = request.user.name is_leader", "0 geocache_group_max = 0 geocache_max = 0 geocache_group_count = 0 # Push the", "timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array", "new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier", "group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all", "timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id']", "return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name,", "serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List", "created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now()", "= group player.leader = True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name':", "return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView):", "player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class", "status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or create a", "icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def", "import APIView from rest_framework.response import Response from rest_framework import status from rest.maingame.channels import", "serializer_class = PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group player", "alias_max = amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count},", "0 geocache_group_count = 0 # Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group", "gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed", "= amg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if", "+ 1 total_score = total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score #", "lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save()", "= PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed =", "identifier = request.data['group_id'] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)", "PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord = request.data['y'] player = request.user", "player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar':", "new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name", "Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except", "Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group", "= request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False)", "group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id,", "group = Group.objects.create(name=group_name) player.group = group player.leader = True player.save() return Response({ 'player_id':", "= timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request):", "HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import", "= group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return", "request): identifier = request.user.group.id if not identifier: return Response({'id': 'This field is required!'},", "# geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score", "= Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name = request.data['group_name'] player =", "group = request.user.group player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created", "Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or", "serializer_class = PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed", "= total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg", "required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self,", "# alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score", "players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'},", "= request.user.name is_leader = request.user.leader player_id = request.user.id group_id = request.user.group.id return Response({'group':", "y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in players:", "'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None): serializer =", "alias_group_count = 0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max", "gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if", "from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer", "or create a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def", "django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models", "= 0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max = 0 geocache_group_count =", "get(self, request): identifier = request.user.group.id if not identifier: return Response({'id': 'This field is", "group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or create a new snippet.", "the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max =", "JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser", "request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid", "'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets,", "'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets,", "<reponame>JaliJuhola/tasks-around-tampere from django.shortcuts import render from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import", "request, format=None): identifier = request.GET[\"id\"] if not identifier: return Response({'id': 'This field is", "group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED)", "timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for", "create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self,", "return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or create a new", "request.GET[\"id\"] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player", "post(self, request): group_name = request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group =", "lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array =", "queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group", "= PlayerSerializer def get(self, request): identifier = request.user.group.id if not identifier: return Response({'id':", "= amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score", "'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count':", "request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord", "class PlayerView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "= Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier = request.user.group.id if not", "{'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView):", "player = request.user group = Group.objects.create(name=group_name) player.group = group player.leader = True player.save()", "class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord = request.data['y'] player =", "= 4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max =", "= <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class", "from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from", "0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max = 0 geocache_group_count = 0", "Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response", "player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or create", "List all snippets, or create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class", "push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash':", "if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score')", "request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader':", "response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players':", "= PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id'] if", "def post(self, request, format=None): identifier = request.data['group_id'] if not identifier: return Response({'id': 'This", "player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y =", "rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import json from django.utils", "Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request,", "= 0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max =", "timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in", "Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max", "player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since", "from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import", "request.user.group.id if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players =", "response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed", "0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max = 0", "push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group=", "player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request):", "Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class", "= gcmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if", "status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "+ geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max,", "'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets,", "def get(self, request): group_name = request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id", "group player.leader = True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name,", "request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True}) class AvatarView(APIView):", "'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or create a", "total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores", "player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def", "import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import", "except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self,", "not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer", "'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets,", "PlayerSerializer def get(self, request): group_name = request.user.group.name player_name = request.user.name is_leader = request.user.leader", "group_name = request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group = group player.leader", "players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None):", "PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors,", "field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game", "snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name =", "gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max =", "import uuid from rest.common.channels import PUSHER_CLIENT import json from django.utils import timezone from", "lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets, or create a", "player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save() players", "amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed +", "= Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby =", "WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import json from django.utils import timezone", "snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name =", "\"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None): identifier =", "player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid():", "List all snippets, or create a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class", "last_connection__gte=timezone.now()) response_array = [] for player in players: player_type = 1 if player", "found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token", "from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import json from", "0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count = 0", "ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score", "2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return", "if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1", "uuid from rest.common.channels import PUSHER_CLIENT import json from django.utils import timezone from rest.push_the_buttons.models", "rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player, Group,", "token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "alias_max = 0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count", "minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save()", "'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or create a new snippet.", "AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group':", "players: player_type = 1 if player == request.user: player_type= 3 elif player.leader: player_type", "= 0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count =", "post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group player = request.user player.last_connection =", "WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({", "def post(self, request): group_name = request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group", "= minigames_completed + 1 total_score = total_score + geocache_max if amg.first(): alias_max =", "#from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from", "players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id':", "from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player,", "Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None): identifier =", "import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import json from django.utils import", "= total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg", "buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score", "a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request):", "request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token':", "Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord = request.data['y']", "alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max", "request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or", "from rest.common.channels import PUSHER_CLIENT import json from django.utils import timezone from rest.push_the_buttons.models import", "in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y,", "push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max", "Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def", "return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets,", "+ timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby", "= request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord", "= Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name =", "player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array", "from django.shortcuts import render from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt", "= PlayerSerializer def post(self, request): group_name = request.data['group_name'] player = request.user group =", "'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView):", "Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world':", "gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed +", "total_score = total_score + geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push the", "return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max,", "player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\"", "identifier = request.GET[\"id\"] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)", "player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or", "LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id':", "amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score +", "request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id = request.user.id group_id = request.user.group.id", "request.user.group player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group,", "{'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self,", "geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max", "quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count}, 'total_score':", "= Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in players: player_type = 1", "1 total_score = total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias", "players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby: player =", "Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name':", "Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20)", "request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord", "total_score = 0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count", "player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class", "'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world':", "is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game =", "= PlayerSerializer def get(self, request): group_name = request.user.group.name player_name = request.user.name is_leader =", "request): group_name = request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group = group", "player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y':", "= total_score + geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons':", "field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data)", "response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or create a new", "'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4", "minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if gcmg.first(): geocache_max", "serializer_class = PlayerSerializer def post(self, request): group_name = request.data['group_name'] player = request.user group", "player.x = x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array =", "request): minigame_name = request.data['minigame_name'] group = request.user.group player = request.user player.last_connection = timezone.now()", "= 1 if player == request.user: player_type= 3 elif player.leader: player_type = 2", "timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group,", "return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score =", "player in players: player_type = 1 if player == request.user: player_type= 3 elif", "= ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score", "= request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby,", "def get(self, request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max", "= LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id':", "player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id,", "\"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score", "= 0 geocache_group_max = 0 geocache_max = 0 geocache_group_count = 0 # Push", "Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group", "a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self, request):", "= PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group player =", "identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except", "'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message':", "\"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier = request.user.group.id", "quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max", "get(self, request): group_name = request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id =", "'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count':", "1 total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache", "buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count':", "= Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group =", "+ timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player)", "request.data['lobby_id'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player", "alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache':", "= minigames_completed + 1 total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max =", "response_array = [] for player in players: player_type = 1 if player ==", "= 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"})", "= Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"] if", "player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all", "= 0 alias_max = 0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max =", "request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max = 0", "required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'},", "= ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed", "PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List", "rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT", "'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or create", "request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView):", "GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or create", "post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return", "timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby,", "format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token},", "gcmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if gcmg.first():", "from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response import Response", "return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView):", "snippets, or create a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer", "import AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or create a new snippet.", "lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player", "post(self, request): x_cord = request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection =", "= PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"] if not identifier: return", "new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id", "return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None):", "= request.user.group.id if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players", "def get(self, request, format=None): identifier = request.GET[\"id\"] if not identifier: return Response({'id': 'This", "'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save()", "AuthView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets, or create", "return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group =", "= x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = []", "\"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name']", "= Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name)", "\"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id']", "# Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first():", "icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x']", "except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name,", "post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return", "import render from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers", "status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST)", "queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name = request.user.group.name player_name", "'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world':", "== request.user: player_type= 3 elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type,", "class AuthView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "geocache_max = 0 geocache_group_count = 0 # Push the buttons scores ptbmg =", "= GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed", "return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist:", "is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def", "request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist:", "= request.data['group_id'] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try:", "import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby,", "'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or create a new", "Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView", "rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from rest.maingame.channels", "scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count =", "field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player", "0 geocache_max = 0 geocache_group_count = 0 # Push the buttons scores ptbmg", "rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all snippets,", "Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from", "status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token =", "= 0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max = 0 alias_group_count =", "is_leader = request.user.leader player_id = request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name,", "push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + push_the_buttons_group_max", "from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all", "not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data)", "response_array = [] for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name':", "timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20)", "rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response import Response from", "player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name,", "player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby: player", "'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or create a new snippet.", "import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response import Response from rest_framework", "create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self,", "= icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request): x_cord =", "0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max = 0", "try: player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return", "= timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y = y_cord player.save() players =", "in players: player_type = 1 if player == request.user: player_type= 3 elif player.leader:", "True}) class PlayerLocationView(APIView): def post(self, request): x_cord = request.data['x'] y_cord = request.data['y'] player", "a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request):", "request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or", "GroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: player =", "= Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None): identifier", "lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save()", "amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed", "player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in players: player_type", "= 0 quiklash_group_max = 0 quiklash_max = 0 quiklash_group_count = 0 geocache_group_max =", "request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player,", "request.user: player_type= 3 elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location':", "+ 1 total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score #", "alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max,", "geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group=", "player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player =", "if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\"", "return Response({'token': player.token}) def post(self, request, format=None): serializer = PlayerSerializer(data=request.data) token = <KEY>()", "serializer_class = PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save()", "'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array,", "minigames_completed + 1 total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score", "token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or create", "request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all", "created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return", "request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status':", "status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request,", "{'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or", "amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group':", "request): x_cord = request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection = timezone.now()", "player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y},", "many=True) return Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id'] if not identifier:", "LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id})", "rest.common.channels import PUSHER_CLIENT import json from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame", "quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count}, 'total_score': total_score, 'completion_percentage':", "geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world':", "alias_group_max = 0 alias_max = 0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max", "Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\"", "geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1 total_score =", "PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"] if not identifier: return Response({'id':", "from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or create a", "identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer =", "status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name,", "\"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name = request.user.group.name", "lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player =", "Group.objects.create(name=group_name) player.group = group player.leader = True player.save() return Response({ 'player_id': request.user.id, 'player_name':", "+ geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score')", "render from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import", "lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True})", "alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count", "closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save()", "return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}})", "amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1 total_score", "LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now())", "lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby =", "quiklash_max = 0 quiklash_group_count = 0 geocache_group_max = 0 geocache_max = 0 geocache_group_count", "def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True})", "PlayerView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group = group player.leader = True", "if player == request.user: player_type= 3 elif player.leader: player_type = 2 response_array.append({'name': player.name,", "player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player", "+ 1 total_score = total_score + geocache_max if amg.first(): alias_max = amg.first().current_score return", "or create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def", "def post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save()", "ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score +", "request): group_name = request.user.group.name player_name = request.user.name is_leader = request.user.leader player_id = request.user.id", "rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView):", "total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg =", "django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from", "status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import json", "player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or create a new", "= Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name = request.user.group.name player_name =", "quiklash_group_count = 0 geocache_group_max = 0 geocache_max = 0 geocache_group_count = 0 #", "= 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max =", "{'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count},", "amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count()", "x_cord player.y = y_cord player.save() players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for", "<KEY>() if serializer.is_valid(): serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView):", "= request.user.leader player_id = request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name, 'id':", "True}) class AvatarView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "= timezone.now() + timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created =", "PlayerSerializer def post(self, request): lobby_id = request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True", "= request.user group = Group.objects.create(name=group_name) player.group = group player.leader = True player.save() return", "timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id", "[] for player in players: player_type = 1 if player == request.user: player_type=", "{'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count},", "1 if player == request.user: player_type= 3 elif player.leader: player_type = 2 response_array.append({'name':", "push_the_buttons_group_count = 0 alias_group_max = 0 alias_max = 0 alias_group_count = 0 quiklash_group_max", "snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier =", "Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import", "if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1", "= PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return", "lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since =", "= request.user.group player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby, created =", "csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot,", "+ push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score')", "serializer_class = PlayerSerializer def get(self, request): identifier = request.user.group.id if not identifier: return", "post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class", "APIView from rest_framework.response import Response from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels", "request.user.name is_leader = request.user.leader player_id = request.user.id group_id = request.user.group.id return Response({'group': {'name':", "HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import", "def patch(self, request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection = timezone.now() +", "timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame", "import JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import", "not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier)", "\"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name = request.data['group_name']", "queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier = request.user.group.id if", "def post(self, request): x_cord = request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection", "push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max,", "'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count}, 'total_score': total_score, 'completion_percentage': minigames_completed/TOTAL_MINIGAMES})", "'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id},", "= request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id,", "gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count()", "ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed +", "timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now()", "request.data['group_id'] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group", "0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max = 0 alias_group_count = 0", "alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max,", "Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in players: player_type = 1 if", "'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List", "total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores amg =", "Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or create a", "for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x,", "identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group", "import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels import PUSHER_CLIENT import", "= request.data['lobby_id'] lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True}) class", "= 0 # Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score')", "LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from rest_framework.views import APIView from rest_framework.response import", "request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player)", "player = Player.objects.get(id=identifier) except Player.DoesNotExist: return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token':", "patch(self, request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20)", "'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or create", "geocache_group_max = 0 geocache_max = 0 geocache_group_count = 0 # Push the buttons", "rest_framework.response import Response from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid", "all snippets, or create a new snippet. \"\"\" queryset = Player.objects.all() serializer_class =", "'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class", "gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score +", "import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers", "ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed =", "push_the_buttons_max = ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first():", "= Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List", "= [] for player in players: player_type = 1 if player == request.user:", "players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now()) response_array = [] for player in players: player_type =", "= AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed", "request.data['minigame_name'] group = request.user.group player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby,", "request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all", "return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players,", "= True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets, or", "def post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group player = request.user player.last_connection", "AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed =", "scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count =", "from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from", "snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name =", "0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max = 0", "the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max,", "3 elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x,", "serializer.save(token=token) return Response({'token': token}, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all", "create a new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self,", "joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id,", "total_score + geocache_max if amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons': {'group':", "new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name", "lobby_id = request.data['lobby_id'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby =", "= amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias':", "Response from rest_framework import status from rest.maingame.channels import WaitingPlayersToJoinChannels import uuid from rest.common.channels", "class LobbyView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "LobbyExitView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name", "queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name", "\"\"\" List all snippets, or create a new snippet. \"\"\" queryset = Player.objects.all()", "push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count = ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score =", "LobbyView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "= 0 geocache_group_count = 0 # Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score')", "JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer", "JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer,", "get(self, request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max =", "ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count =", "minigames_completed = minigames_completed + 1 total_score = total_score + push_the_buttons_group_max if ptbmg.first(): push_the_buttons_max", "player.group = group player.leader = True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name,", "player_type = 1 if player == request.user: player_type= 3 elif player.leader: player_type =", "0 alias_group_max = 0 alias_max = 0 alias_group_count = 0 quiklash_group_max = 0", "Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id':", "Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection =", "minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max", "group_id = request.user.group.id return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id':", "MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed = 0", "def get(self, request): identifier = request.user.group.id if not identifier: return Response({'id': 'This field", "required!'}, status=status.HTTP_400_BAD_REQUEST) try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count()", "new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None):", "try: group = Group.objects.get(id=identifier) request.user.group = group request.user.save() players_on_game = Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game,", "ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + push_the_buttons_group_max if ptbmg.first():", "0 alias_max = 0 alias_group_count = 0 quiklash_group_max = 0 quiklash_max = 0", "push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max,", "format=None): identifier = request.data['group_id'] if not identifier: return Response({'id': 'This field is required!'},", "player_type= 3 elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude':", "Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST) return Response({'token': player.token}) def post(self, request, format=None): serializer", "Player.objects.all() serializer_class = PlayerSerializer def post(self, request): group_name = request.data['group_name'] player = request.user", "player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView):", "snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None): identifier", "Player.objects.all() serializer_class = PlayerSerializer def get(self, request): identifier = request.user.group.id if not identifier:", "is_leader}}) class GroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max = 0", "queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request, format=None): identifier = request.GET[\"id\"]", "return Response({'players': response_array, 'closed': lobby.closed}) class LobbyExitView(APIView): \"\"\" List all snippets, or create", "geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count", "Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) players = Player.objects.filter(group__id=identifier) serializer = PlayerSerializer(players, many=True)", "return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection", "[] for player_in_lobby in players_in_lobby: player = player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x':", "'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group':", "request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or create a new snippet.", "elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude':", "if amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max,", "Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby", "total_score = total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score # alias scores", "lobby = Lobby.objects.get(id=int(lobby_id)) lobby.closed = True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\"", "scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if ptbmg_group.first(): push_the_buttons_group_max = ptbmg_group.first().current_score push_the_buttons_group_count", "= LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby,", "= minigames_completed + 1 total_score = total_score + geocache_max if gcmg.first(): geocache_max =", "status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class", "'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES", "lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby:", "django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models", "= timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id)) lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player) lobby_player.joined_since =", "PlayerSerializer def post(self, request): icon_name = request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status':", "request.user.leader player_id = request.user.id group_id = request.user.group.id return Response({'group': {'name': group_name, 'id': group_id},", "= player_in_lobby.player response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar':", "import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\"", "from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import GeocacheMainGame from rest.alias.models import AliasMainGame class", "minigames_completed + 1 total_score = total_score + geocache_max if gcmg.first(): geocache_max = gcmg.first().current_score", "rest_framework.parsers import JSONParser from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers", "identifier = request.user.group.id if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)", "import PUSHER_CLIENT import json from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from", "GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed =", "request.data['y'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord player.y", "TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max", "x_cord = request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection = timezone.now() +", "get(self, request, format=None): identifier = request.GET[\"id\"] if not identifier: return Response({'id': 'This field", "Player.objects.all() serializer_class = PlayerSerializer def get(self, request): group_name = request.user.group.name player_name = request.user.name", "Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets, or create a new snippet.", "= LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = [] for player_in_lobby in players_in_lobby: player = player_in_lobby.player", "import csrf_exempt from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from rest.maingame.models import", "import json from django.utils import timezone from rest.push_the_buttons.models import PushTheButtonsMainGame from rest.geocache.models import", "status=status.HTTP_400_BAD_REQUEST) class PlayerGroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from", "= timezone.now() + timezone.timedelta(seconds=20) lobby_player.save() player.save() players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now()) response_array = []", "player == request.user: player_type= 3 elif player.leader: player_type = 2 response_array.append({'name': player.name, 'type':", "'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count}, 'total_score': total_score,", "gcmg_group.first(): geocache_group_max = gcmg_group.first().current_score geocache_group_count = gcmg_group.count() minigames_completed = minigames_completed + 1 total_score", "amg.first(): alias_max = amg.first().current_score return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count':", "new snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): minigame_name", "timezone.timedelta(seconds=20) lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False) lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player) lobby_player.joined_since", "alias_group_max = amg_group.first().current_score alias_group_count = amg_group.count() minigames_completed = minigames_completed + 1 total_score =", "minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if amg.first(): alias_max", "snippet. \"\"\" queryset = Lobby.objects.all() serializer_class = PlayerSerializer def post(self, request): icon_name =", "PlayerSerializer from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status", "PlayerGroupView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset =", "request.user group = Group.objects.create(name=group_name) player.group = group player.leader = True player.save() return Response({", "{'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count},", "a new snippet. \"\"\" queryset = Player.objects.all() serializer_class = PlayerSerializer def get(self, request,", "\"\"\" List all snippets, or create a new snippet. \"\"\" queryset = Lobby.objects.all()", "player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES =", "'group_id': request.user.group.id}, status=status.HTTP_201_CREATED) class PlayerView(APIView): \"\"\" List all snippets, or create a new", "Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return", "PlayerSerializer(players, many=True) return Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id'] if not", "= request.data['x'] y_cord = request.data['y'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20)", "minigames_completed + 1 total_score = total_score + geocache_max if amg.first(): alias_max = amg.first().current_score", "'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': \"../assets/testmarker.png\"}) return Response({'players': response_array}) class MinigameProgressionView(APIView): def", "+ timezone.timedelta(seconds=20) lobby_player.save() lobby.save() player.save() return Response({'lobby_id': lobby.id}) def patch(self, request): lobby_id =", "= request.data['lobby_id'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) lobby = Lobby.objects.get(id=int(lobby_id))", "group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST) return Response({ 'player_id': request.user.id,", "status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\"", "ptbmg.first().current_score # geocache scores gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score') gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score') if gcmg_group.first(): geocache_group_max =", "from django.http import HttpResponse, JsonResponse #from django.views.decorators.csrf import csrf_exempt from rest_framework.renderers import JSONRenderer", "= request.GET[\"id\"] if not identifier: return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST) try:", "lobby.closed = True lobby.save() return Response({'status': True}) class AvatarView(APIView): \"\"\" List all snippets,", "0 # Push the buttons scores ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score') ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score') if", "return Response(serializer.data) def post(self, request, format=None): identifier = request.data['group_id'] if not identifier: return", "import GeocacheMainGame from rest.alias.models import AliasMainGame class AuthView(APIView): \"\"\" List all snippets, or", "Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\"", "player.leader = True player.save() return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id':", "class AvatarView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "= request.data['y'] player = request.user player.last_connection = timezone.now() + timezone.timedelta(seconds=20) player.x = x_cord", "PlayerSerializer def post(self, request): minigame_name = request.data['minigame_name'] group = request.user.group player = request.user", "= gcmg.first().current_score # alias scores amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score') amg_group= amg.filter(group=request.user.group).order_by('-current_score') if amg_group.first(): alias_group_max", "push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max = 0 alias_max = 0 alias_group_count", "lobby.id}) def patch(self, request): lobby_id = request.data['lobby_id'] player = request.user player.last_connection = timezone.now()", "class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score = 0 minigames_completed =", "= 0 minigames_completed = 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count =", "= Player.objects.filter(group=group).count() WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name) except Group.DoesNotExist: return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST)", "rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer from rest.maingame.serializers import HotspotSerializer, PlayerSerializer from", "class LobbyExitView(APIView): \"\"\" List all snippets, or create a new snippet. \"\"\" queryset", "Response({'players': response_array}) class MinigameProgressionView(APIView): def get(self, request): TOTAL_MINIGAMES = 4 total_score = 0", "'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED) class LobbyView(APIView): \"\"\" List", "player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name}) return Response({'players':", "group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}}) class GroupView(APIView): \"\"\"", "request.data['icon_name'] request.user.icon_name = icon_name request.user.save() return Response({'status': True}) class PlayerLocationView(APIView): def post(self, request):", "= ptbmg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + push_the_buttons_group_max if", "amg_group.count() minigames_completed = minigames_completed + 1 total_score = total_score + geocache_max if amg.first():", "= 0 push_the_buttons_group_max = 0 push_the_buttons_max = 0 push_the_buttons_group_count = 0 alias_group_max =", "= request.data['group_name'] player = request.user group = Group.objects.create(name=group_name) player.group = group player.leader =", "= 0 geocache_max = 0 geocache_group_count = 0 # Push the buttons scores" ]
[ "gTTS from pydub import AudioSegment import hashlib try: from urllib.parse import unquote_plus except:", "= os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text = unquote_plus(text) tts", "AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename)", "Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text", "= gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound", "import os from config import Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers", "= sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ ==", "production port = int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug = False app.wsgi_app", "import hashlib try: from urllib.parse import unquote_plus except: from urllib import unquote_plus config", "os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename,", "ProxyFix import logging from gtts import gTTS from pydub import AudioSegment import hashlib", "gtts import gTTS from pydub import AudioSegment import hashlib try: from urllib.parse import", "from pydub import AudioSegment import hashlib try: from urllib.parse import unquote_plus except: from", "import logging from gtts import gTTS from pydub import AudioSegment import hashlib try:", "sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': #", "config.debug_mode to False in production port = int(os.environ.get(\"PORT\", config.port)) if port != config.port:", "send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\",", "unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename)", "unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def", "tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000'])", "return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be sure to set config.debug_mode", "text): lang = lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename =", "tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename)", "set config.debug_mode to False in production port = int(os.environ.get(\"PORT\", config.port)) if port !=", "os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text = unquote_plus(text) tts =", "return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\",", "filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3')", "from urllib import unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR =", "int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug = False app.wsgi_app = ProxyFix(app.wsgi_app) app.run(host='0.0.0.0',", "logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text =", "send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be sure to set config.debug_mode to", "Be sure to set config.debug_mode to False in production port = int(os.environ.get(\"PORT\", config.port))", "sure to set config.debug_mode to False in production port = int(os.environ.get(\"PORT\", config.port)) if", "import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from gtts import gTTS", "from config import Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix", "'16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be sure to set", "if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0)", "gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound =", "bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be sure", "send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from gtts import gTTS from pydub", "generate(lang, text): lang = lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename", "from flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from gtts", "lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound =", "to False in production port = int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug", "def generate(lang, text): lang = lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang)", "Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from", "flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from gtts import", "import ProxyFix import logging from gtts import gTTS from pydub import AudioSegment import", "format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be", "in production port = int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug = False", "try: from urllib.parse import unquote_plus except: from urllib import unquote_plus config = Config()", "coding: utf-8 -*- import os from config import Config from flask import Flask,", "from werkzeug.contrib.fixers import ProxyFix import logging from gtts import gTTS from pydub import", "sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__", "== '__main__': # Be sure to set config.debug_mode to False in production port", "Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging from gtts import gTTS from", "format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if", "-*- import os from config import Config from flask import Flask, send_from_directory from", "= Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text):", "to set config.debug_mode to False in production port = int(os.environ.get(\"PORT\", config.port)) if port", "= lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound", "filename) if __name__ == '__main__': # Be sure to set config.debug_mode to False", "from gtts import gTTS from pydub import AudioSegment import hashlib try: from urllib.parse", "import AudioSegment import hashlib try: from urllib.parse import unquote_plus except: from urllib import", "= AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR,", "lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename):", "urllib.parse import unquote_plus except: from urllib import unquote_plus config = Config() app =", "@app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text = unquote_plus(text) tts = gTTS(text=text,", "'__main__': # Be sure to set config.debug_mode to False in production port =", "= Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower()", "text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return", "urllib import unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR']", "import gTTS from pydub import AudioSegment import hashlib try: from urllib.parse import unquote_plus", "lang = lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3'", "import Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import logging", "config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang,", "AudioSegment import hashlib try: from urllib.parse import unquote_plus except: from urllib import unquote_plus", "logging from gtts import gTTS from pydub import AudioSegment import hashlib try: from", "from urllib.parse import unquote_plus except: from urllib import unquote_plus config = Config() app", "= unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR,", "filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar',", "unquote_plus except: from urllib import unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG)", "os from config import Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers import", "import unquote_plus except: from urllib import unquote_plus config = Config() app = Flask(__name__)", "lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if os.path.isfile(STORAGE_DIR+filename): return send_from_directory(STORAGE_DIR, filename) tts.save(STORAGE_DIR+filename) sound = AudioSegment.from_file(STORAGE_DIR+filename,", "-*- coding: utf-8 -*- import os from config import Config from flask import", "Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang", "port = int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug = False app.wsgi_app =", "# Be sure to set config.debug_mode to False in production port = int(os.environ.get(\"PORT\",", "utf-8 -*- import os from config import Config from flask import Flask, send_from_directory", "False in production port = int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug =", "config import Config from flask import Flask, send_from_directory from werkzeug.contrib.fixers import ProxyFix import", "except: from urllib import unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR", "sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3') sound = sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return", "__name__ == '__main__': # Be sure to set config.debug_mode to False in production", "STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang = lang.lower() text = unquote_plus(text)", "if __name__ == '__main__': # Be sure to set config.debug_mode to False in", "# -*- coding: utf-8 -*- import os from config import Config from flask", "import unquote_plus config = Config() app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>')", "app = Flask(__name__) logging.getLogger('flask_tts').setLevel(logging.DEBUG) STORAGE_DIR = os.environ['STORAGE_DIR'] @app.route('/generate/<lang>/<text>') def generate(lang, text): lang =", "config.port)) if port != config.port: config.debug = False app.wsgi_app = ProxyFix(app.wsgi_app) app.run(host='0.0.0.0', debug=config.debug_mode,", "if port != config.port: config.debug = False app.wsgi_app = ProxyFix(app.wsgi_app) app.run(host='0.0.0.0', debug=config.debug_mode, port=port)", "hashlib try: from urllib.parse import unquote_plus except: from urllib import unquote_plus config =", "pydub import AudioSegment import hashlib try: from urllib.parse import unquote_plus except: from urllib", "sound.apply_gain(+8.0) sound.export(STORAGE_DIR+filename, format=\"mp3\", bitrate=\"48k\", parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__':", "parameters=['-ac','2','-ar', '16000']) return send_from_directory(STORAGE_DIR, filename) if __name__ == '__main__': # Be sure to", "= int(os.environ.get(\"PORT\", config.port)) if port != config.port: config.debug = False app.wsgi_app = ProxyFix(app.wsgi_app)", "= lang.lower() text = unquote_plus(text) tts = gTTS(text=text, lang=lang) filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3' if", "werkzeug.contrib.fixers import ProxyFix import logging from gtts import gTTS from pydub import AudioSegment" ]
[ "to discretize 3-dimensional spaces in regions and facilitate the retrieve by regions or", "groups functions to discretize 3-dimensional spaces in regions and facilitate the retrieve by", "regions and facilitate the retrieve by regions or define neighbourhood with fixed regions.", "and facilitate the retrieve by regions or define neighbourhood with fixed regions. \"\"\"", "================= Space discretization module groups functions to discretize 3-dimensional spaces in regions and", "module groups functions to discretize 3-dimensional spaces in regions and facilitate the retrieve", "\"\"\" 3D discretization ================= Space discretization module groups functions to discretize 3-dimensional spaces", "functions to discretize 3-dimensional spaces in regions and facilitate the retrieve by regions", "discretization ================= Space discretization module groups functions to discretize 3-dimensional spaces in regions", "3-dimensional spaces in regions and facilitate the retrieve by regions or define neighbourhood", "Space discretization module groups functions to discretize 3-dimensional spaces in regions and facilitate", "in regions and facilitate the retrieve by regions or define neighbourhood with fixed", "3D discretization ================= Space discretization module groups functions to discretize 3-dimensional spaces in", "discretization module groups functions to discretize 3-dimensional spaces in regions and facilitate the", "spaces in regions and facilitate the retrieve by regions or define neighbourhood with", "discretize 3-dimensional spaces in regions and facilitate the retrieve by regions or define" ]
[ "as results: rdr = csv.reader(results) outcomes = (float(row[10]) for row in rdr) first", "for Mastering Object-Oriented Python 2nd Edition Chapter 14. Example 1 -- simulation model.", "AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) ->", "sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1 -", "BettingStrategy max_rounds: int init_stake: int rounds: int = field(init=False) stake: float = field(init=False)", "int] class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class", "A mock simulation which is built from the above mock objects. import random", "@dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds: int", "in outcomes: sum_0 += 1 # value**0 sum_1 += value # value**1 value_min", "mocks. class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft", "range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds > 0: self.player.rounds -= 1", "file can be read. :param path: Path to the example output \"\"\" with", "payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy):", "if outcome < 0.579: self.player.stake -= 1 elif 0.579 <= outcome < 0.883:", "Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class", "self.stake = self.init_stake # A mock simulation which is built from the above", "# Mock Object Model # ===================== # A set of class hierarchies that", "pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to", "= random.random() if outcome < 0.579: self.player.stake -= 1 elif 0.579 <= outcome", "pathlib import Path import csv # Mock Object Model # ===================== # A", "self.table.payout blackjack_payout = x / y for count in range(self.samples): self.player.reset() while self.player.stake", "def record_loss(self) -> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy):", "resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\" pass @dataclass", "reset(self) -> None: self.rounds = self.max_rounds self.stake = self.init_stake # A mock simulation", "Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds:", "max_rounds: int init_stake: int rounds: int = field(init=False) stake: float = field(init=False) def", "min(value_min, value) value_max = max(value_max, value) mean = sum_1 / sum_0 print( f\"{path}\\nMean", "+= blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path) -> None: \"\"\" Validate", "Edition Chapter 14. Example 1 -- simulation model. \"\"\" from dataclasses import dataclass,", "-> None: self.rounds = self.max_rounds self.stake = self.init_stake # A mock simulation which", "Iterator from pathlib import Path import csv # Mock Object Model # =====================", "only to aces; no resplit.\"\"\" pass @dataclass class Table: decks: int limit: int", "to aces; no resplit.\"\"\" pass @dataclass class Table: decks: int limit: int dealer:", "class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule: def __repr__(self) -> str:", "field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset() def reset(self) -> None: self.rounds", "simulation.\"\"\" table: Table player: Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical", "hierarchies that we'll use for several examples. # The content is mostly mocks.", "int: raise NotImplementedError(\"No bet method\") def record_win(self) -> None: pass def record_loss(self) ->", "def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class", "Validate unit test result file can be read. :param path: Path to the", "stake: float = field(init=False) def __post_init__(self): self.reset() def reset(self) -> None: self.rounds =", "self.rounds = self.max_rounds self.stake = self.init_stake # A mock simulation which is built", "decks: int limit: int dealer: DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy:", "def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout =", "value_min = value_max = first for value in outcomes: sum_0 += 1 #", "Object Model # ===================== # A set of class hierarchies that we'll use", "sum_1 += value # value**1 value_min = min(value_min, value) value_max = max(value_max, value)", "from the above mock objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table:", "rdr = csv.reader(results) outcomes = (float(row[10]) for row in rdr) first = next(outcomes)", "<= outcome < 0.943: # a \"push\" pass else: # 0.943 <= outcome", "= max(value_max, value) mean = sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House", "typing import Tuple, Iterator from pathlib import Path import csv # Mock Object", "outcome = random.random() if outcome < 0.579: self.player.stake -= 1 elif 0.579 <=", "asdict, field from typing import Tuple, Iterator from pathlib import Path import csv", "str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on", "can be read. :param path: Path to the example output \"\"\" with path.open(\"r\")", "\"\"\" Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition", "self.player.rounds -= 1 outcome = random.random() if outcome < 0.579: self.player.stake -= 1", "value**1 value_min = min(value_min, value) value_max = max(value_max, value) mean = sum_1 /", "class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self)", "resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card", "bet(self) -> int: raise NotImplementedError(\"No bet method\") def record_win(self) -> None: pass def", "Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 14. Example 1 --", "@dataclass class Table: decks: int limit: int dealer: DealerRule split: SplitRule payout: Tuple[int,", "= (float(row[10]) for row in rdr) first = next(outcomes) sum_0, sum_1 = 1,", "raise NotImplementedError(\"No bet method\") def record_win(self) -> None: pass def record_loss(self) -> None:", "__iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout = x", "The content is mostly mocks. class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\"", "pass class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int:", "a \"push\" pass else: # 0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table)", "init_stake: int rounds: int = field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset()", "pass def record_loss(self) -> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class", "csv # Mock Object Model # ===================== # A set of class hierarchies", "pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def", "is mostly mocks. class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule):", "None: pass def record_loss(self) -> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass", "0: self.player.rounds -= 1 outcome = random.random() if outcome < 0.579: self.player.stake -=", "0 and self.player.rounds > 0: self.player.rounds -= 1 outcome = random.random() if outcome", "objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples:", "pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\" pass @dataclass class", "= self.table.payout blackjack_payout = x / y for count in range(self.samples): self.player.reset() while", "y = self.table.payout blackjack_payout = x / y for count in range(self.samples): self.player.reset()", "None: self.rounds = self.max_rounds self.stake = self.init_stake # A mock simulation which is", "value**0 sum_1 += value # value**1 value_min = min(value_min, value) value_max = max(value_max,", "several examples. # The content is mostly mocks. class DealerRule: def __repr__(self) ->", "\"\"\"One card only to aces; no resplit.\"\"\" pass @dataclass class Table: decks: int", "method\") def record_win(self) -> None: pass def record_loss(self) -> None: pass class Flat(BettingStrategy):", "no resplit.\"\"\" pass @dataclass class Table: decks: int limit: int dealer: DealerRule split:", "Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 14. Example", "None: \"\"\" Validate unit test result file can be read. :param path: Path", "outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path) -> None:", "= self.init_stake # A mock simulation which is built from the above mock", "+= value # value**1 value_min = min(value_min, value) value_max = max(value_max, value) mean", "betting: BettingStrategy max_rounds: int init_stake: int rounds: int = field(init=False) stake: float =", "mock objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player", "x, y = self.table.payout blackjack_payout = x / y for count in range(self.samples):", "class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds: int =", "def __post_init__(self): self.reset() def reset(self) -> None: self.rounds = self.max_rounds self.stake = self.init_stake", "value # value**1 value_min = min(value_min, value) value_max = max(value_max, value) mean =", "from pathlib import Path import csv # Mock Object Model # ===================== #", "field(init=False) def __post_init__(self): self.reset() def reset(self) -> None: self.rounds = self.max_rounds self.stake =", "-> None: pass def record_loss(self) -> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy):", "for row in rdr) first = next(outcomes) sum_0, sum_1 = 1, first value_min", "astuple, asdict, field from typing import Tuple, Iterator from pathlib import Path import", "split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\"", "NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\" pass @dataclass class Table: decks:", "output \"\"\" with path.open(\"r\") as results: rdr = csv.reader(results) outcomes = (float(row[10]) for", "first value_min = value_max = first for value in outcomes: sum_0 += 1", "= min(value_min, value) value_max = max(value_max, value) mean = sum_1 / sum_0 print(", "def bet(self) -> int: raise NotImplementedError(\"No bet method\") def record_win(self) -> None: pass", "# value**1 value_min = min(value_min, value) value_max = max(value_max, value) mean = sum_1", "Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule: def __repr__(self) -> str: return", "play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds: int = field(init=False) stake:", "random.random() if outcome < 0.579: self.player.stake -= 1 elif 0.579 <= outcome <", "the example output \"\"\" with path.open(\"r\") as results: rdr = csv.reader(results) outcomes =", "mean = sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = {", "print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1 - mean / 50:.1%}\\n\"", "Table: decks: int limit: int dealer: DealerRule split: SplitRule payout: Tuple[int, int] class", "with path.open(\"r\") as results: rdr = csv.reader(results) outcomes = (float(row[10]) for row in", "0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path)", "record_win(self) -> None: pass def record_loss(self) -> None: pass class Flat(BettingStrategy): pass class", "<= outcome < 0.883: self.player.stake += 1 elif 0.883 <= outcome < 0.943:", "the above mock objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table", "0.579: self.player.stake -= 1 elif 0.579 <= outcome < 0.883: self.player.stake += 1", "1 -- simulation model. \"\"\" from dataclasses import dataclass, astuple, asdict, field from", "= self.max_rounds self.stake = self.init_stake # A mock simulation which is built from", "result file can be read. :param path: Path to the example output \"\"\"", "BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No", "self.player.reset() while self.player.stake > 0 and self.player.rounds > 0: self.player.rounds -= 1 outcome", "is built from the above mock objects. import random @dataclass class Simulate: \"\"\"Mock", "rounds: int = field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset() def reset(self)", "class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass", "value in outcomes: sum_0 += 1 # value**0 sum_1 += value # value**1", "bet method\") def record_win(self) -> None: pass def record_loss(self) -> None: pass class", "/ sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1 - mean", "else: # 0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def", "17\"\"\" pass class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic", "mostly mocks. class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits", "for count in range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds > 0:", "-> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands", "self.player.stake -= 1 elif 0.579 <= outcome < 0.883: self.player.stake += 1 elif", "-> None: \"\"\" Validate unit test result file can be read. :param path:", "for value in outcomes: sum_0 += 1 # value**0 sum_1 += value #", "outcomes: sum_0 += 1 # value**0 sum_1 += value # value**1 value_min =", "value_max = first for value in outcomes: sum_0 += 1 # value**0 sum_1", "class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy", "built from the above mock objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\"", "def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class", "\"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout = x / y for", "# ===================== # A set of class hierarchies that we'll use for several", "f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet method\") def record_win(self) -> None:", "be read. :param path: Path to the example output \"\"\" with path.open(\"r\") as", "soft 17\"\"\" pass class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule):", "max(value_max, value) mean = sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge", "simulation model. \"\"\" from dataclasses import dataclass, astuple, asdict, field from typing import", "pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds:", "= sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1", "x / y for count in range(self.samples): self.player.reset() while self.player.stake > 0 and", "sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1 - mean /", "elif 0.883 <= outcome < 0.943: # a \"push\" pass else: # 0.943", "value) value_max = max(value_max, value) mean = sum_1 / sum_0 print( f\"{path}\\nMean =", "1 elif 0.883 <= outcome < 0.943: # a \"push\" pass else: #", "and self.player.rounds > 0: self.player.rounds -= 1 outcome = random.random() if outcome <", "on soft 17\"\"\" pass class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class", "Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout = x / y", "= value_max = first for value in outcomes: sum_0 += 1 # value**0", "pass @dataclass class Table: decks: int limit: int dealer: DealerRule split: SplitRule payout:", "< 0.579: self.player.stake -= 1 elif 0.579 <= outcome < 0.883: self.player.stake +=", "class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake:", "= next(outcomes) sum_0, sum_1 = 1, first value_min = value_max = first for", "Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy", "def reset(self) -> None: self.rounds = self.max_rounds self.stake = self.init_stake # A mock", "pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int", "class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\" pass @dataclass class Table:", "# A mock simulation which is built from the above mock objects. import", "\"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule:", "import dataclass, astuple, asdict, field from typing import Tuple, Iterator from pathlib import", "{mean:.1f}\\n\" f\"House Edge = { 1 - mean / 50:.1%}\\n\" f\"Range = {value_min:.1f}", "0.883: self.player.stake += 1 elif 0.883 <= outcome < 0.943: # a \"push\"", "value_min = min(value_min, value) value_max = max(value_max, value) mean = sum_1 / sum_0", "self.max_rounds self.stake = self.init_stake # A mock simulation which is built from the", "y for count in range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds >", "f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass", "soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule: def", "card only to aces; no resplit.\"\"\" pass @dataclass class Table: decks: int limit:", "for several examples. # The content is mostly mocks. class DealerRule: def __repr__(self)", "from dataclasses import dataclass, astuple, asdict, field from typing import Tuple, Iterator from", "\"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One", "set of class hierarchies that we'll use for several examples. # The content", "str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self)", "no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\" pass", "pass class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit", "player: Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y", "examples. # The content is mostly mocks. class DealerRule: def __repr__(self) -> str:", "import Path import csv # Mock Object Model # ===================== # A set", "def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet", "1 # value**0 sum_1 += value # value**1 value_min = min(value_min, value) value_max", "dealer: DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str:", "field from typing import Tuple, Iterator from pathlib import Path import csv #", "outcome < 0.883: self.player.stake += 1 elif 0.883 <= outcome < 0.943: #", "NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no", "\"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield", "class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play:", "self.reset() def reset(self) -> None: self.rounds = self.max_rounds self.stake = self.init_stake # A", "Edge = { 1 - mean / 50:.1%}\\n\" f\"Range = {value_min:.1f} {value_max:.1f}\" )", "f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str:", "Model # ===================== # A set of class hierarchies that we'll use for", "0.579 <= outcome < 0.883: self.player.stake += 1 elif 0.883 <= outcome <", "= csv.reader(results) outcomes = (float(row[10]) for row in rdr) first = next(outcomes) sum_0,", "DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str: return", "> 0 and self.player.rounds > 0: self.player.rounds -= 1 outcome = random.random() if", "python3.7 \"\"\" Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd", "mock simulation which is built from the above mock objects. import random @dataclass", "table: Table player: Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\"", "float = field(init=False) def __post_init__(self): self.reset() def reset(self) -> None: self.rounds = self.max_rounds", "that we'll use for several examples. # The content is mostly mocks. class", "count in range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds > 0: self.player.rounds", "A set of class hierarchies that we'll use for several examples. # The", "self.init_stake # A mock simulation which is built from the above mock objects.", "Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass", "DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass", "results: rdr = csv.reader(results) outcomes = (float(row[10]) for row in rdr) first =", "samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout", "Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 14.", "random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int def", "= {mean:.1f}\\n\" f\"House Edge = { 1 - mean / 50:.1%}\\n\" f\"Range =", "= x / y for count in range(self.samples): self.player.reset() while self.player.stake > 0", "int = field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset() def reset(self) ->", "14. Example 1 -- simulation model. \"\"\" from dataclasses import dataclass, astuple, asdict,", "pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting:", "first = next(outcomes) sum_0, sum_1 = 1, first value_min = value_max = first", "# value**0 sum_1 += value # value**1 value_min = min(value_min, value) value_max =", "< 0.943: # a \"push\" pass else: # 0.943 <= outcome self.player.stake +=", "class hierarchies that we'll use for several examples. # The content is mostly", "0.883 <= outcome < 0.943: # a \"push\" pass else: # 0.943 <=", "Example 1 -- simulation model. \"\"\" from dataclasses import dataclass, astuple, asdict, field", "outcomes = (float(row[10]) for row in rdr) first = next(outcomes) sum_0, sum_1 =", "-> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout = x /", "path.open(\"r\") as results: rdr = csv.reader(results) outcomes = (float(row[10]) for row in rdr)", "@dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int def __iter__(self)", "-> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def", "__repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule):", "int dealer: DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) ->", "self.player.stake > 0 and self.player.rounds > 0: self.player.rounds -= 1 outcome = random.random()", "astuple(self.player) def check(path: Path) -> None: \"\"\" Validate unit test result file can", "return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\"", "pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule: def __repr__(self) ->", "blackjack_payout = x / y for count in range(self.samples): self.player.reset() while self.player.stake >", "class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int def __iter__(self) ->", "sum_1 = 1, first value_min = value_max = first for value in outcomes:", "class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\"", "pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class Player:", "class Table: decks: int limit: int dealer: DealerRule split: SplitRule payout: Tuple[int, int]", "blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path) -> None: \"\"\" Validate unit", "OneThreeTwoSix(BettingStrategy): pass @dataclass class Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int", "class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy):", "pass else: # 0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player)", "Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y =", "statistical samples.\"\"\" x, y = self.table.payout blackjack_payout = x / y for count", "int rounds: int = field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset() def", "None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass class", "str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no", "class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces;", "Path) -> None: \"\"\" Validate unit test result file can be read. :param", "\"\"\" from dataclasses import dataclass, astuple, asdict, field from typing import Tuple, Iterator", "<reponame>pauldevos/Mastering-Object-Oriented-Python-Second-Edition #!/usr/bin/env python3.7 \"\"\" Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented", "\"\"\" with path.open(\"r\") as results: rdr = csv.reader(results) outcomes = (float(row[10]) for row", "value_max = max(value_max, value) mean = sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\"", "resplit.\"\"\" pass @dataclass class Table: decks: int limit: int dealer: DealerRule split: SplitRule", "-- simulation model. \"\"\" from dataclasses import dataclass, astuple, asdict, field from typing", "Mock Object Model # ===================== # A set of class hierarchies that we'll", "\"\"\" Validate unit test result file can be read. :param path: Path to", "0.943: # a \"push\" pass else: # 0.943 <= outcome self.player.stake += blackjack_payout", "# a \"push\" pass else: # 0.943 <= outcome self.player.stake += blackjack_payout yield", "= field(init=False) stake: float = field(init=False) def __post_init__(self): self.reset() def reset(self) -> None:", "yield astuple(self.table) + astuple(self.player) def check(path: Path) -> None: \"\"\" Validate unit test", ":param path: Path to the example output \"\"\" with path.open(\"r\") as results: rdr", "2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter 14. Example 1", "-> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet method\") def", "+ astuple(self.player) def check(path: Path) -> None: \"\"\" Validate unit test result file", "sum_0, sum_1 = 1, first value_min = value_max = first for value in", "import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int", "astuple(self.table) + astuple(self.player) def check(path: Path) -> None: \"\"\" Validate unit test result", "<= outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path) ->", "# The content is mostly mocks. class DealerRule: def __repr__(self) -> str: return", "content is mostly mocks. class DealerRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class", "which is built from the above mock objects. import random @dataclass class Simulate:", "+= 1 # value**0 sum_1 += value # value**1 value_min = min(value_min, value)", "unit test result file can be read. :param path: Path to the example", "NotImplementedError(\"No bet method\") def record_win(self) -> None: pass def record_loss(self) -> None: pass", "= first for value in outcomes: sum_0 += 1 # value**0 sum_1 +=", "Path import csv # Mock Object Model # ===================== # A set of", "int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x, y = self.table.payout blackjack_payout", "\"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only to aces; no resplit.\"\"\"", "next(outcomes) sum_0, sum_1 = 1, first value_min = value_max = first for value", "import Tuple, Iterator from pathlib import Path import csv # Mock Object Model", "-> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic", "outcome < 0.943: # a \"push\" pass else: # 0.943 <= outcome self.player.stake", "= field(init=False) def __post_init__(self): self.reset() def reset(self) -> None: self.rounds = self.max_rounds self.stake", "in rdr) first = next(outcomes) sum_0, sum_1 = 1, first value_min = value_max", "Examples for Mastering Object-Oriented Python 2nd Edition Chapter 14. Example 1 -- simulation", "return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft", "to the example output \"\"\" with path.open(\"r\") as results: rdr = csv.reader(results) outcomes", "class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class", "#!/usr/bin/env python3.7 \"\"\" Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python", "PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass", "self.player.stake += 1 elif 0.883 <= outcome < 0.943: # a \"push\" pass", "def record_win(self) -> None: pass def record_loss(self) -> None: pass class Flat(BettingStrategy): pass", "\"push\" pass else: # 0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table) +", "dataclass, astuple, asdict, field from typing import Tuple, Iterator from pathlib import Path", "__repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet method\")", "class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str: return", "\"\"\"Stands on soft 17\"\"\" pass class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\"", "-> int: raise NotImplementedError(\"No bet method\") def record_win(self) -> None: pass def record_loss(self)", "aces; no resplit.\"\"\" pass @dataclass class Table: decks: int limit: int dealer: DealerRule", "> 0: self.player.rounds -= 1 outcome = random.random() if outcome < 0.579: self.player.stake", "we'll use for several examples. # The content is mostly mocks. class DealerRule:", "first for value in outcomes: sum_0 += 1 # value**0 sum_1 += value", "in range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds > 0: self.player.rounds -=", "class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise", "-= 1 elif 0.579 <= outcome < 0.883: self.player.stake += 1 elif 0.883", "Tuple, Iterator from pathlib import Path import csv # Mock Object Model #", "import csv # Mock Object Model # ===================== # A set of class", "f\"House Edge = { 1 - mean / 50:.1%}\\n\" f\"Range = {value_min:.1f} {value_max:.1f}\"", "int init_stake: int rounds: int = field(init=False) stake: float = field(init=False) def __post_init__(self):", "Mastering Object-Oriented Python 2e Code Examples for Mastering Object-Oriented Python 2nd Edition Chapter", "+= 1 elif 0.883 <= outcome < 0.943: # a \"push\" pass else:", "path: Path to the example output \"\"\" with path.open(\"r\") as results: rdr =", "= 1, first value_min = value_max = first for value in outcomes: sum_0", "Object-Oriented Python 2nd Edition Chapter 14. Example 1 -- simulation model. \"\"\" from", "Path to the example output \"\"\" with path.open(\"r\") as results: rdr = csv.reader(results)", "f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge = { 1 - mean / 50:.1%}\\n\" f\"Range", "limit: int dealer: DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self)", "example output \"\"\" with path.open(\"r\") as results: rdr = csv.reader(results) outcomes = (float(row[10])", "csv.reader(results) outcomes = (float(row[10]) for row in rdr) first = next(outcomes) sum_0, sum_1", "Python 2nd Edition Chapter 14. Example 1 -- simulation model. \"\"\" from dataclasses", "model. \"\"\" from dataclasses import dataclass, astuple, asdict, field from typing import Tuple,", "Table player: Player samples: int def __iter__(self) -> Iterator[Tuple]: \"\"\"Yield statistical samples.\"\"\" x,", "# A set of class hierarchies that we'll use for several examples. #", "above mock objects. import random @dataclass class Simulate: \"\"\"Mock simulation.\"\"\" table: Table player:", "return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) ->", "simulation which is built from the above mock objects. import random @dataclass class", "f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\"", "str: return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet method\") def record_win(self)", "return f\"{self.__class__.__name__}()\" def bet(self) -> int: raise NotImplementedError(\"No bet method\") def record_win(self) ->", "-> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass @dataclass", "read. :param path: Path to the example output \"\"\" with path.open(\"r\") as results:", "17\"\"\" pass class Stand17(DealerRule): \"\"\"Stands on soft 17\"\"\" pass class SplitRule: def __repr__(self)", "self.player.rounds > 0: self.player.rounds -= 1 outcome = random.random() if outcome < 0.579:", "elif 0.579 <= outcome < 0.883: self.player.stake += 1 elif 0.883 <= outcome", "anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule): \"\"\"One card only", "rdr) first = next(outcomes) sum_0, sum_1 = 1, first value_min = value_max =", "record_loss(self) -> None: pass class Flat(BettingStrategy): pass class Martingale(BettingStrategy): pass class OneThreeTwoSix(BettingStrategy): pass", "Player: play: PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds: int = field(init=False)", "check(path: Path) -> None: \"\"\" Validate unit test result file can be read.", "__repr__(self) -> str: return f\"{self.__class__.__name__}()\" class SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy:", "dataclasses import dataclass, astuple, asdict, field from typing import Tuple, Iterator from pathlib", "use for several examples. # The content is mostly mocks. class DealerRule: def", "SomeStrategy(PlayerStrategy): pass class AnotherStrategy(PlayerStrategy): pass class BettingStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\"", "2nd Edition Chapter 14. Example 1 -- simulation model. \"\"\" from dataclasses import", "outcome < 0.579: self.player.stake -= 1 elif 0.579 <= outcome < 0.883: self.player.stake", "SplitRule payout: Tuple[int, int] class PlayerStrategy: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class", "__repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class Stand17(DealerRule):", "def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class Hit17(DealerRule): \"\"\"Hits soft 17\"\"\" pass class", "class SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\"", "Mastering Object-Oriented Python 2nd Edition Chapter 14. Example 1 -- simulation model. \"\"\"", "self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path: Path) -> None: \"\"\"", "test result file can be read. :param path: Path to the example output", "while self.player.stake > 0 and self.player.rounds > 0: self.player.rounds -= 1 outcome =", "(float(row[10]) for row in rdr) first = next(outcomes) sum_0, sum_1 = 1, first", "1 outcome = random.random() if outcome < 0.579: self.player.stake -= 1 elif 0.579", "< 0.883: self.player.stake += 1 elif 0.883 <= outcome < 0.943: # a", "row in rdr) first = next(outcomes) sum_0, sum_1 = 1, first value_min =", "/ y for count in range(self.samples): self.player.reset() while self.player.stake > 0 and self.player.rounds", "Chapter 14. Example 1 -- simulation model. \"\"\" from dataclasses import dataclass, astuple,", "ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass class NoReSplit(SplitRule): \"\"\"Simplistic no resplit.\"\"\" pass class NoReSplitAces(SplitRule):", "Simulate: \"\"\"Mock simulation.\"\"\" table: Table player: Player samples: int def __iter__(self) -> Iterator[Tuple]:", "SplitRule: def __repr__(self) -> str: return f\"{self.__class__.__name__}()\" class ReSplit(SplitRule): \"\"\"Simplistic resplit anything.\"\"\" pass", "from typing import Tuple, Iterator from pathlib import Path import csv # Mock", "def check(path: Path) -> None: \"\"\" Validate unit test result file can be", "1 elif 0.579 <= outcome < 0.883: self.player.stake += 1 elif 0.883 <=", "-= 1 outcome = random.random() if outcome < 0.579: self.player.stake -= 1 elif", "samples.\"\"\" x, y = self.table.payout blackjack_payout = x / y for count in", "value) mean = sum_1 / sum_0 print( f\"{path}\\nMean = {mean:.1f}\\n\" f\"House Edge =", "PlayerStrategy betting: BettingStrategy max_rounds: int init_stake: int rounds: int = field(init=False) stake: float", "of class hierarchies that we'll use for several examples. # The content is", "===================== # A set of class hierarchies that we'll use for several examples.", "1, first value_min = value_max = first for value in outcomes: sum_0 +=", "sum_0 += 1 # value**0 sum_1 += value # value**1 value_min = min(value_min,", "# 0.943 <= outcome self.player.stake += blackjack_payout yield astuple(self.table) + astuple(self.player) def check(path:", "int limit: int dealer: DealerRule split: SplitRule payout: Tuple[int, int] class PlayerStrategy: def", "__post_init__(self): self.reset() def reset(self) -> None: self.rounds = self.max_rounds self.stake = self.init_stake #" ]
[ "os resultados em uma lista results = [] for roll_num in range(100): result", "for value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados", "die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar() hist.title", "[1, 2, 3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency of", "hist = pygal.Bar() hist.title = \"Results of rolling one D6 1000 times.\" hist.x_labels", "import Die # Cria um D6 die = Die() # Faz alguns lancamentos", "results.append(result) # Analisa os resultados frequencies = [] for value in range(1, die.num_sides+1):", "[] for value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os", "in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados hist =", "Visualiza os resultados hist = pygal.Bar() hist.title = \"Results of rolling one D6", "pygal.Bar() hist.title = \"Results of rolling one D6 1000 times.\" hist.x_labels = [1,", "<reponame>SweydAbdul/estudos-python<gh_stars>0 import pygal from CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die # Cria um D6 die =", "D6 1000 times.\" hist.x_labels = [1, 2, 3, 4, 5, 6] hist.x_title =", "1000 times.\" hist.x_labels = [1, 2, 3, 4, 5, 6] hist.x_title = \"Results\"", "rolling one D6 1000 times.\" hist.x_labels = [1, 2, 3, 4, 5, 6]", "2, 3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency of result\"", "5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency of result\" hist.add('D6', frequencies) hist.render_to_file('die_visual.svg')", "Die() # Faz alguns lancamentos e armazena os resultados em uma lista results", "D6 die = Die() # Faz alguns lancamentos e armazena os resultados em", "import pygal from CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die # Cria um D6 die = Die()", "results = [] for roll_num in range(100): result = die.roll() results.append(result) # Analisa", "range(100): result = die.roll() results.append(result) # Analisa os resultados frequencies = [] for", "= die.roll() results.append(result) # Analisa os resultados frequencies = [] for value in", "3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency of result\" hist.add('D6',", "armazena os resultados em uma lista results = [] for roll_num in range(100):", "em uma lista results = [] for roll_num in range(100): result = die.roll()", "= [] for value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza", "= Die() # Faz alguns lancamentos e armazena os resultados em uma lista", "hist.title = \"Results of rolling one D6 1000 times.\" hist.x_labels = [1, 2,", "alguns lancamentos e armazena os resultados em uma lista results = [] for", "\"Results of rolling one D6 1000 times.\" hist.x_labels = [1, 2, 3, 4,", "uma lista results = [] for roll_num in range(100): result = die.roll() results.append(result)", "results.count(value) frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar() hist.title = \"Results of", "CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die # Cria um D6 die = Die() # Faz alguns", "lancamentos e armazena os resultados em uma lista results = [] for roll_num", "frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar() hist.title = \"Results of rolling", "hist.x_labels = [1, 2, 3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title =", "frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar() hist.title =", "Analisa os resultados frequencies = [] for value in range(1, die.num_sides+1): frequency =", "in range(100): result = die.roll() results.append(result) # Analisa os resultados frequencies = []", "roll_num in range(100): result = die.roll() results.append(result) # Analisa os resultados frequencies =", "# Analisa os resultados frequencies = [] for value in range(1, die.num_sides+1): frequency", "e armazena os resultados em uma lista results = [] for roll_num in", "frequencies = [] for value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) #", "= results.count(value) frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar() hist.title = \"Results", "times.\" hist.x_labels = [1, 2, 3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title", "of rolling one D6 1000 times.\" hist.x_labels = [1, 2, 3, 4, 5,", "Faz alguns lancamentos e armazena os resultados em uma lista results = []", "os resultados frequencies = [] for value in range(1, die.num_sides+1): frequency = results.count(value)", "range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados hist = pygal.Bar()", "resultados hist = pygal.Bar() hist.title = \"Results of rolling one D6 1000 times.\"", "4, 5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency of result\" hist.add('D6', frequencies)", "one D6 1000 times.\" hist.x_labels = [1, 2, 3, 4, 5, 6] hist.x_title", "pygal from CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die # Cria um D6 die = Die() #", "resultados frequencies = [] for value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency)", "= [1, 2, 3, 4, 5, 6] hist.x_title = \"Results\" hist.y_title = \"Frequency", "die.roll() results.append(result) # Analisa os resultados frequencies = [] for value in range(1,", "lista results = [] for roll_num in range(100): result = die.roll() results.append(result) #", "# Visualiza os resultados hist = pygal.Bar() hist.title = \"Results of rolling one", "value in range(1, die.num_sides+1): frequency = results.count(value) frequencies.append(frequency) # Visualiza os resultados hist", "Die # Cria um D6 die = Die() # Faz alguns lancamentos e", "die = Die() # Faz alguns lancamentos e armazena os resultados em uma", "os resultados hist = pygal.Bar() hist.title = \"Results of rolling one D6 1000", "# Cria um D6 die = Die() # Faz alguns lancamentos e armazena", "# Faz alguns lancamentos e armazena os resultados em uma lista results =", "[] for roll_num in range(100): result = die.roll() results.append(result) # Analisa os resultados", "Cria um D6 die = Die() # Faz alguns lancamentos e armazena os", "for roll_num in range(100): result = die.roll() results.append(result) # Analisa os resultados frequencies", "result = die.roll() results.append(result) # Analisa os resultados frequencies = [] for value", "= [] for roll_num in range(100): result = die.roll() results.append(result) # Analisa os", "= pygal.Bar() hist.title = \"Results of rolling one D6 1000 times.\" hist.x_labels =", "= \"Results of rolling one D6 1000 times.\" hist.x_labels = [1, 2, 3,", "from CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die # Cria um D6 die = Die() # Faz", "um D6 die = Die() # Faz alguns lancamentos e armazena os resultados", "resultados em uma lista results = [] for roll_num in range(100): result =" ]
[ "= peso / (altura * altura) print('Seu imc é de {:.1f}'.format(imc)) if imc", "do peso!') elif imc <= 25: print('Seu peso é ideal!') elif imc <=", "sua altura? (M) ')) imc = peso / (altura * altura) print('Seu imc", "peso = float(input('Qual é seu peso? (KG) ')) altura = float(input('Qual é sua", "(M) ')) imc = peso / (altura * altura) print('Seu imc é de", "imc <= 30: print('Você esta com sobrepeso!') elif imc <= 40: print('Você esta", "print('Você esta com sobrepeso!') elif imc <= 40: print('Você esta com obesidade, CUIDADO!!')", "= float(input('Qual é seu peso? (KG) ')) altura = float(input('Qual é sua altura?", "é sua altura? (M) ')) imc = peso / (altura * altura) print('Seu", "30: print('Você esta com sobrepeso!') elif imc <= 40: print('Você esta com obesidade,", "imc <= 25: print('Seu peso é ideal!') elif imc <= 30: print('Você esta", "* altura) print('Seu imc é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta", "print('Seu imc é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo do", "<= 30: print('Você esta com sobrepeso!') elif imc <= 40: print('Você esta com", "peso!') elif imc <= 25: print('Seu peso é ideal!') elif imc <= 30:", "<= 40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce esta com obesidade mórbida,", "40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce esta com obesidade mórbida, CUIDADO!!')", "peso é ideal!') elif imc <= 30: print('Você esta com sobrepeso!') elif imc", "float(input('Qual é seu peso? (KG) ')) altura = float(input('Qual é sua altura? (M)", "25: print('Seu peso é ideal!') elif imc <= 30: print('Você esta com sobrepeso!')", "imc <= 40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce esta com obesidade", "(KG) ')) altura = float(input('Qual é sua altura? (M) ')) imc = peso", "é ideal!') elif imc <= 30: print('Você esta com sobrepeso!') elif imc <=", "imc <= 18.5: print('Você esta abaixo do peso!') elif imc <= 25: print('Seu", "<= 18.5: print('Você esta abaixo do peso!') elif imc <= 25: print('Seu peso", "{:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo do peso!') elif imc <=", "float(input('Qual é sua altura? (M) ')) imc = peso / (altura * altura)", "de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo do peso!') elif imc", "altura) print('Seu imc é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo", "18.5: print('Você esta abaixo do peso!') elif imc <= 25: print('Seu peso é", "altura? (M) ')) imc = peso / (altura * altura) print('Seu imc é", "seu peso? (KG) ')) altura = float(input('Qual é sua altura? (M) ')) imc", "abaixo do peso!') elif imc <= 25: print('Seu peso é ideal!') elif imc", "ideal!') elif imc <= 30: print('Você esta com sobrepeso!') elif imc <= 40:", "sobrepeso!') elif imc <= 40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce esta", "elif imc <= 30: print('Você esta com sobrepeso!') elif imc <= 40: print('Você", "')) altura = float(input('Qual é sua altura? (M) ')) imc = peso /", "print('Seu peso é ideal!') elif imc <= 30: print('Você esta com sobrepeso!') elif", "if imc <= 18.5: print('Você esta abaixo do peso!') elif imc <= 25:", "')) imc = peso / (altura * altura) print('Seu imc é de {:.1f}'.format(imc))", "= float(input('Qual é sua altura? (M) ')) imc = peso / (altura *", "é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo do peso!') elif", "elif imc <= 25: print('Seu peso é ideal!') elif imc <= 30: print('Você", "(altura * altura) print('Seu imc é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você", "altura = float(input('Qual é sua altura? (M) ')) imc = peso / (altura", "imc = peso / (altura * altura) print('Seu imc é de {:.1f}'.format(imc)) if", "peso? (KG) ')) altura = float(input('Qual é sua altura? (M) ')) imc =", "com sobrepeso!') elif imc <= 40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce", "/ (altura * altura) print('Seu imc é de {:.1f}'.format(imc)) if imc <= 18.5:", "print('Você esta abaixo do peso!') elif imc <= 25: print('Seu peso é ideal!')", "elif imc <= 40: print('Você esta com obesidade, CUIDADO!!') else: print('Voce esta com", "esta abaixo do peso!') elif imc <= 25: print('Seu peso é ideal!') elif", "esta com sobrepeso!') elif imc <= 40: print('Você esta com obesidade, CUIDADO!!') else:", "imc é de {:.1f}'.format(imc)) if imc <= 18.5: print('Você esta abaixo do peso!')", "é seu peso? (KG) ')) altura = float(input('Qual é sua altura? (M) '))", "<= 25: print('Seu peso é ideal!') elif imc <= 30: print('Você esta com", "peso / (altura * altura) print('Seu imc é de {:.1f}'.format(imc)) if imc <=" ]
[ "gg: break elif x == ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack)", "print('~>', repr(res)) return res def _render_frame(self, images): self.frame = images def eval_step(self, mouse):", "= MachineImage().decode_lists(image) assert rev == data, (rev, data) class Galaxy: def __init__(self, target='release',", "fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes =", "len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return res", "data in test_cases: image = MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert", "TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number", "while (stack[-3] == '$') and (stack[-2] != '$'): head, tail = stack[-2], stack[-1]", "'darwin' else '.so') build_target = (target + '/') if target else '' fn", "x = data[i] i += 1 if x == gg: break elif x", "num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe = [(fn,", "[data] while fringe: item = fringe.pop() if isinstance(item, tuple) and (len(item) == 1):", "item = fringe.pop() if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item,", "stack[-1] if head == cons: xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail,", "= 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so') build_target = (target", "11, 12], 3, (8, 9)], ] for data in test_cases: image = MachineImage().encode_lists(data)", "# print('<', (images)) self.state = new_state self._render_frame(images) if __name__ == '__main__': g =", "ctypes import sys from pathlib import Path from .space import SpaceClient _known_tokens =", "build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32", "fringe = [data] while fringe: item = fringe.pop() if isinstance(item, tuple) and (len(item)", "(head.arg, tail) else: raise Exception((head, tail)) stack[-3:] = [xs] stack = ['$', '$']", "MachineImage().decode_lists(image) assert rev == data, (rev, data) class Galaxy: def __init__(self, target='release', api_host=None,", "= self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg, *tail] elif", "*tail) else: xs = (head.arg, tail) else: raise Exception((head, tail)) stack[-3:] = [xs]", "import Path from .space import SpaceClient _known_tokens = 'ap cons nil neg c", "ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe =", "0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:]))", "= map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe = [(fn, args)] while", "self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64)", "elif isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail, tuple):", "(u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space =", "yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item)", "= (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space", "res def _render_frame(self, images): self.frame = images def eval_step(self, mouse): print('>', (self.state)) print('>',", "elif isinstance(tail, tuple): xs = (head.arg, *tail) else: xs = (head.arg, tail) else:", "ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype =", "event) data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res)", "s isnil car eq mul add lt div i t f cdr SCAN", "reduce(stack) elif x == num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return", "class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state = [] fn = 'libgalaxy'", "self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image) res =", "def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state, images) =", "MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data)) res =", "if sys.platform == 'darwin' else '.so') build_target = (target + '/') if target", "break elif x == ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif", "image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image),", "return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap", "= (head.arg, *tail) else: xs = (head.arg, tail) else: raise Exception((head, tail)) stack[-3:]", "DEF galaxy GG' _Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)} class", "args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num, nil,", "0): return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image", "neg c b s isnil car eq mul add lt div i t", "= self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images): self.frame = images def", "args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num, nil, gg =", "else: yield num yield int(item) return list(encode(data)) class _partial: def __init__(self, arg): self.arg", "= ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes", "= (target + '/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target", "None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state, data =", "number FUN DEF galaxy GG' _Tokens = {s:i for i, s in enumerate(_known_tokens.split(),", "isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail, tuple): xs", "ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return", "+ build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 =", "None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) == 0): yield", "res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return res def", "fringe.pop() if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and", "== 0): return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None)", "fn is None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) ==", "'$'] i = 0 while True: # print('** ', i, repr(stack), '--', repr(data[i]))", "SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state, data = self._evaluate(state, event) if", "num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack): while", "SpaceClient _known_tokens = 'ap cons nil neg c b s isnil car eq", "list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap", "int(item) return list(encode(data)) class _partial: def __init__(self, arg): self.arg = arg def __repr__(self):", "* len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return", "nil neg c b s isnil car eq mul add lt div i", "i += 1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__,", "fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 =", "[], [42], (2, 7), [(3, 1)], [[],[],[]], [0, [42, 11, 12], 3, (8,", "*args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe", "state, event) data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res =", "target='release', api_host=None, api_key=None): self.state = [] fn = 'libgalaxy' + ('.dylib' if sys.platform", "nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i += 1; reduce(stack) else:", "encode(data): fringe = [data] while fringe: item = fringe.pop() if isinstance(item, tuple) and", "return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__,", "(ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res))", "state, event): flag, state, data = self._evaluate(state, event) if (flag == 0): return", "nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack): while (stack[-3]", "tail)) stack[-3:] = [xs] stack = ['$', '$'] i = 0 while True:", "nil GG'.split()) def reduce(stack): while (stack[-3] == '$') and (stack[-2] != '$'): head,", "map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack): while (stack[-3] == '$') and", "else: stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [", "= arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil,", "num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data): fringe", "self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) == 0): yield self.TOKENS[fn] else: yield", "stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i", "self.state = new_state self._render_frame(images) if __name__ == '__main__': g = Galaxy() r =", "== num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self):", "reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases =", "= map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack): while (stack[-3] == '$')", "self._partial): if isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail, tuple): xs =", "== ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif x == num:", "new_state self._render_frame(images) if __name__ == '__main__': g = Galaxy() r = g.eval_step((0,0)) print(repr(r))", "= {s:i for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass", "eq mul add lt div i t f cdr SCAN number FUN DEF", "yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return list(encode(data))", "images) = self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) # print('<', (images)) self.state", "fn, args = fringe.pop() if fn is None: yield from self.encode_lists(args) elif isinstance(args,", "data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split())", "Exception((head, tail)) stack[-3:] = [xs] stack = ['$', '$'] i = 0 while", "elif isinstance(item, (list, tuple)): yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else:", "repr(res)) return res def _render_frame(self, images): self.frame = images def eval_step(self, mouse): print('>',", "= self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self,", "i += 1 if x == gg: break elif x == ap: stack.append('$')", "stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x)", "data = self._evaluate(state, event) if (flag == 0): return (state, data) return self._interact(state,", "= stack[-2], stack[-1] if head == cons: xs = self._partial(tail) elif isinstance(head, self._partial):", "for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class MachineImage:", "images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state, images)", "api_host=None, api_key=None): self.state = [] fn = 'libgalaxy' + ('.dylib' if sys.platform ==", "(len(args) == 0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return", "print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state, images) = self._interact(self.state, mouse or", "(new_state, images) = self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) # print('<', (images))", "or (0, 0)) print('<', (new_state)) # print('<', (images)) self.state = new_state self._render_frame(images) if", "cons number nil GG'.split()) def encode(data): fringe = [data] while fringe: item =", "repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res))", "= ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype", "self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state,", "x == ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif x ==", "x == nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i += 1;", "args)] while fringe: fn, args = fringe.pop() if fn is None: yield from", "(8, 9)], ] for data in test_cases: image = MachineImage().encode_lists(data) image += [gg]", "= MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data)) res", "gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack): while (stack[-3] ==", "= map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2, 7), [(3, 1)], [[],[],[]],", "ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype =", "(0, 0)) print('<', (new_state)) # print('<', (images)) self.state = new_state self._render_frame(images) if __name__", "mouse): print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state, images) = self._interact(self.state, mouse", "1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0): yield nil elif isinstance(item,", "def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn,", "print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images): self.frame", "cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return list(encode(data)) class _partial: def", "sys.platform == 'darwin' else '.so') build_target = (target + '/') if target else", "import sys from pathlib import Path from .space import SpaceClient _known_tokens = 'ap", "== '$') and (stack[-2] != '$'): head, tail = stack[-2], stack[-1] if head", "return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event)", "p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self,", "isinstance(tail, tuple): xs = (head.arg, *tail) else: xs = (head.arg, tail) else: raise", "data, (rev, data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state = []", "def reduce(stack): while (stack[-3] == '$') and (stack[-2] != '$'): head, tail =", "(rev, data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state = [] fn", "pathlib import Path from .space import SpaceClient _known_tokens = 'ap cons nil neg", "x == num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return stack[-1] def", "print('<', repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>',", "(len(item) == 0): yield nil elif isinstance(item, (list, tuple)): yield ap yield ap", "GG'.split()) def reduce(stack): while (stack[-3] == '$') and (stack[-2] != '$'): head, tail", "head, tail = stack[-2], stack[-1] if head == cons: xs = self._partial(tail) elif", "fringe.append(item[0]) else: yield num yield int(item) return list(encode(data)) class _partial: def __init__(self, arg):", "encode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil", "+ fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes", "[xs] stack = ['$', '$'] i = 0 while True: # print('** ',", "target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy =", "data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images):", "arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil, gg", "(len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0): yield nil", "Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state = [] fn = 'libgalaxy' +", "('.dylib' if sys.platform == 'darwin' else '.so') build_target = (target + '/') if", "from .space import SpaceClient _known_tokens = 'ap cons nil neg c b s", "_render_frame(self, images): self.frame = images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or", "= self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) # print('<', (images)) self.state =", "+= 1 if x == gg: break elif x == ap: stack.append('$') elif", "stack[-3:] = [xs] stack = ['$', '$'] i = 0 while True: #", "(2, 7), [(3, 1)], [[],[],[]], [0, [42, 11, 12], 3, (8, 9)], ]", "__init__(self, arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap,", "list) and (len(item) == 0): yield nil elif isinstance(item, (list, tuple)): yield ap", "emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args):", "= self._evaluate(state, event) if (flag == 0): return (state, data) return self._interact(state, self._send_to_alien(data))", "# print('** ', i, repr(stack), '--', repr(data[i])) x = data[i] i += 1", "Path from .space import SpaceClient _known_tokens = 'ap cons nil neg c b", "emit(fn, args): fringe = [(fn, args)] while fringe: fn, args = fringe.pop() if", "yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return list(encode(data)) class _partial:", "stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2,", "rev == data, (rev, data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state", "1 if x == gg: break elif x == ap: stack.append('$') elif x", "(target + '/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target +", "x == gg: break elif x == ap: stack.append('$') elif x == nil:", "while True: # print('** ', i, repr(stack), '--', repr(data[i])) x = data[i] i", "= [(fn, args)] while fringe: fn, args = fringe.pop() if fn is None:", "cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data):", "tuple)): yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield", "'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so') build_target = (target +", "return list(encode(data)) class _partial: def __init__(self, arg): self.arg = arg def __repr__(self): return", "+= [gg] rev = MachineImage().decode_lists(image) assert rev == data, (rev, data) class Galaxy:", "[[],[],[]], [0, [42, 11, 12], 3, (8, 9)], ] for data in test_cases:", "build_target = (target + '/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' +", "fringe = [(fn, args)] while fringe: fn, args = fringe.pop() if fn is", "fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0): yield nil elif isinstance(item, (list,", "galaxy GG' _Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy:", "gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe = [(fn, args)]", "= fringe.pop() if fn is None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple))", "_known_tokens = 'ap cons nil neg c b s isnil car eq mul", "yield int(item) return list(encode(data)) class _partial: def __init__(self, arg): self.arg = arg def", "and (stack[-2] != '$'): head, tail = stack[-2], stack[-1] if head == cons:", "from pathlib import Path from .space import SpaceClient _known_tokens = 'ap cons nil", "= data[i] i += 1 if x == gg: break elif x ==", "GG' _Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def", "== data, (rev, data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state =", "= images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state,", "for data in test_cases: image = MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image)", "= MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data)", "= [xs] stack = ['$', '$'] i = 0 while True: # print('**", "= next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64)", "p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64", "(list, tuple)) and (len(args) == 0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1]))", "else: xs = (head.arg, tail) else: raise Exception((head, tail)) stack[-3:] = [xs] stack", "[head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg, *tail) else: xs = (head.arg,", "decode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil", "tail) else: raise Exception((head, tail)) stack[-3:] = [xs] stack = ['$', '$'] i", "test_cases = [ [], [42], (2, 7), [(3, 1)], [[],[],[]], [0, [42, 11,", "event) if (flag == 0): return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self,", "= p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def", "self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail,", "and (len(args) == 0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1]))", "c b s isnil car eq mul add lt div i t f", "[42, 11, 12], 3, (8, 9)], ] for data in test_cases: image =", "data[i] i += 1 if x == gg: break elif x == ap:", "12], 3, (8, 9)], ] for data in test_cases: image = MachineImage().encode_lists(data) image", "while fringe: fn, args = fringe.pop() if fn is None: yield from self.encode_lists(args)", "def _render_frame(self, images): self.frame = images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse", "print('<', (images)) self.state = new_state self._render_frame(images) if __name__ == '__main__': g = Galaxy()", "res = MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data))", "pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num, gg =", "def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num,", "res def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return res", "args): fringe = [(fn, args)] while fringe: fn, args = fringe.pop() if fn", "i = 0 while True: # print('** ', i, repr(stack), '--', repr(data[i])) x", "'$'): head, tail = stack[-2], stack[-1] if head == cons: xs = self._partial(tail)", "sys from pathlib import Path from .space import SpaceClient _known_tokens = 'ap cons", "def __init__(self, target='release', api_host=None, api_key=None): self.state = [] fn = 'libgalaxy' + ('.dylib'", "number nil GG'.split()) def encode(data): fringe = [data] while fringe: item = fringe.pop()", "= 'ap cons nil neg c b s isnil car eq mul add", "yield nil elif isinstance(item, (list, tuple)): yield ap yield ap yield cons fringe.append(item[1:])", "stack = ['$', '$'] i = 0 while True: # print('** ', i,", "is None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) == 0):", "_evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 *", "9)], ] for data in test_cases: image = MachineImage().encode_lists(data) image += [gg] rev", "if x == gg: break elif x == ap: stack.append('$') elif x ==", "_interact(self, state, event): flag, state, data = self._evaluate(state, event) if (flag == 0):", "FUN DEF galaxy GG' _Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)}", "def encode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number", "[(fn, args)] while fringe: fn, args = fringe.pop() if fn is None: yield", "state, data = self._evaluate(state, event) if (flag == 0): return (state, data) return", "import SpaceClient _known_tokens = 'ap cons nil neg c b s isnil car", "== cons: xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs =", "= fringe.pop() if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list)", "class _partial: def __init__(self, arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def", "elif x == nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i +=", "self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) # print('<', (images)) self.state = new_state", "_partial: def __init__(self, arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self,", "state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image)", "def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return res def", "*tail] elif isinstance(tail, tuple): xs = (head.arg, *tail) else: xs = (head.arg, tail)", "cons nil neg c b s isnil car eq mul add lt div", "3, (8, 9)], ] for data in test_cases: image = MachineImage().encode_lists(data) image +=", "return res def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return", "number GG'.split()) def emit(fn, args): fringe = [(fn, args)] while fringe: fn, args", "__init__(self, target='release', api_host=None, api_key=None): self.state = [] fn = 'libgalaxy' + ('.dylib' if", "+ ('.dylib' if sys.platform == 'darwin' else '.so') build_target = (target + '/')", "test_cases: image = MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert rev ==", "0)) print('<', (new_state)) # print('<', (images)) self.state = new_state self._render_frame(images) if __name__ ==", "run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2, 7), [(3,", "data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state,", "else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data):", "(self.state)) print('>', (mouse or (0, 0))) (new_state, images) = self._interact(self.state, mouse or (0,", "self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key)", "args = fringe.pop() if fn is None: yield from self.encode_lists(args) elif isinstance(args, (list,", "', i, repr(stack), '--', repr(data[i])) x = data[i] i += 1 if x", "elif isinstance(item, list) and (len(item) == 0): yield nil elif isinstance(item, (list, tuple)):", "True: # print('** ', i, repr(stack), '--', repr(data[i])) x = data[i] i +=", "AlienProxy: def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap,", "_send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data) print('~>', repr(res)) return res def _render_frame(self,", "nil GG'.split()) def encode(data): fringe = [data] while fringe: item = fringe.pop() if", "(state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy',", "i, repr(stack), '--', repr(data[i])) x = data[i] i += 1 if x ==", "tuple): xs = (head.arg, *tail) else: xs = (head.arg, tail) else: raise Exception((head,", "== 'darwin' else '.so') build_target = (target + '/') if target else ''", "repr(data)) res = self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images): self.frame =", "self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state, data = self._evaluate(state,", "image = MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert rev == data,", "tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0):", "list): xs = [head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg, *tail) else:", "rev = MachineImage().decode_lists(image) assert rev == data, (rev, data) class Galaxy: def __init__(self,", "u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,)", "= (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<',", "(new_state)) # print('<', (images)) self.state = new_state self._render_frame(images) if __name__ == '__main__': g", "next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32", "isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg, *tail)", "event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image) res", "dict(_Tokens) def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split()) def", "GG'.split()) def encode(data): fringe = [data] while fringe: item = fringe.pop() if isinstance(item,", "and (len(item) == 0): yield nil elif isinstance(item, (list, tuple)): yield ap yield", "add lt div i t f cdr SCAN number FUN DEF galaxy GG'", "(stack[-3] == '$') and (stack[-2] != '$'): head, tail = stack[-2], stack[-1] if", "[(3, 1)], [[],[],[]], [0, [42, 11, 12], 3, (8, 9)], ] for data", "['$', '$'] i = 0 while True: # print('** ', i, repr(stack), '--',", "print('<', (new_state)) # print('<', (images)) self.state = new_state self._render_frame(images) if __name__ == '__main__':", "'/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn)))", "car eq mul add lt div i t f cdr SCAN number FUN", "mouse or (0, 0)) print('<', (new_state)) # print('<', (images)) self.state = new_state self._render_frame(images)", "class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__,", "self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self,", "import ctypes import sys from pathlib import Path from .space import SpaceClient _known_tokens", "1)} class AlienProxy: def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self,", "self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images): self.frame = images def eval_step(self,", "if head == cons: xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list):", "xs = (head.arg, *tail) else: xs = (head.arg, tail) else: raise Exception((head, tail))", "print('** ', i, repr(stack), '--', repr(data[i])) x = data[i] i += 1 if", "(stack[-2] != '$'): head, tail = stack[-2], stack[-1] if head == cons: xs", "== 0): yield nil elif isinstance(item, (list, tuple)): yield ap yield ap yield", "isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) ==", "else '.so') build_target = (target + '/') if target else '' fn =", "== 0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0],", "elif x == ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif x", "api_key=None): self.state = [] fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin'", "fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so') build_target =", "repr(stack), '--', repr(data[i])) x = data[i] i += 1 if x == gg:", "'.so') build_target = (target + '/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/'", "data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self, data): print('<~',", "enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def", "data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None): self.state = [] fn =", "+= 1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split())", "from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) == 0): yield self.TOKENS[fn] else:", "arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons,", "stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [],", "s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class MachineImage: TOKENS =", "if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy", "return res def _render_frame(self, images): self.frame = images def eval_step(self, mouse): print('>', (self.state))", "args[1:])) def encode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons", "else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn)", "'ap cons number nil GG'.split()) def reduce(stack): while (stack[-3] == '$') and (stack[-2]", "= ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype", "'ap cons number nil GG'.split()) def encode(data): fringe = [data] while fringe: item", "0): yield nil elif isinstance(item, (list, tuple)): yield ap yield ap yield cons", "= dict(_Tokens) def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split())", "(0, 0))) (new_state, images) = self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) #", "p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host,", "def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil, gg =", "xs = [head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg, *tail) else: xs", "cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def reduce(stack):", "def _interact(self, state, event): flag, state, data = self._evaluate(state, event) if (flag ==", "self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data =", "yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args) == 0): yield self.TOKENS[fn]", "gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data): fringe = [data]", "[ [], [42], (2, 7), [(3, 1)], [[],[],[]], [0, [42, 11, 12], 3,", "def encode(data): fringe = [data] while fringe: item = fringe.pop() if isinstance(item, tuple)", "self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) # print('<', repr(res)) return res def _send_to_alien(self, data):", "isinstance(item, (list, tuple)): yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield", "ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes =", "print('>', (mouse or (0, 0))) (new_state, images) = self._interact(self.state, mouse or (0, 0))", "(list, tuple)): yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num", "= [ [], [42], (2, 7), [(3, 1)], [[],[],[]], [0, [42, 11, 12],", "1)], [[],[],[]], [0, [42, 11, 12], 3, (8, 9)], ] for data in", "in test_cases: image = MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert rev", "map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data): fringe = [data] while fringe:", "list(encode(data)) class _partial: def __init__(self, arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})'", "fringe: item = fringe.pop() if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif", "t f cdr SCAN number FUN DEF galaxy GG' _Tokens = {s:i for", "(mouse or (0, 0))) (new_state, images) = self._interact(self.state, mouse or (0, 0)) print('<',", "i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class MachineImage: TOKENS", "raise Exception((head, tail)) stack[-3:] = [xs] stack = ['$', '$'] i = 0", "!= '$'): head, tail = stack[-2], stack[-1] if head == cons: xs =", "images): self.frame = images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0,", "event): flag, state, data = self._evaluate(state, event) if (flag == 0): return (state,", "(images)) self.state = new_state self._render_frame(images) if __name__ == '__main__': g = Galaxy() r", "[42], (2, 7), [(3, 1)], [[],[],[]], [0, [42, 11, 12], 3, (8, 9)],", "map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2, 7), [(3, 1)], [[],[],[]], [0,", "+ '/') if target else '' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn))", "self.galexy.evaluate.argtypes = (u32, p64) self.galexy.evaluate.restype = p64 self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None", "= MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert rev == data, (rev,", "MachineImage().encode_lists(data) image += [gg] rev = MachineImage().decode_lists(image) assert rev == data, (rev, data)", "fringe.pop() if fn is None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and", "self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num,", "if (flag == 0): return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state,", "assert rev == data, (rev, data) class Galaxy: def __init__(self, target='release', api_host=None, api_key=None):", "tail = stack[-2], stack[-1] if head == cons: xs = self._partial(tail) elif isinstance(head,", "return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42],", "def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2, 7),", "self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data", "= (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event):", "self.frame = images def eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0, 0)))", "= SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state, data = self._evaluate(state, event)", "in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens)", "if fn is None: yield from self.encode_lists(args) elif isinstance(args, (list, tuple)) and (len(args)", "ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons,", "'--', repr(data[i])) x = data[i] i += 1 if x == gg: break", "== nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]); i += 1; reduce(stack)", "tuple)) and (len(args) == 0): yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn,", "# print('<', repr(res)) return res def _send_to_alien(self, data): print('<~', repr(data)) res = self.space.send(data)", "MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num, gg = map(self.TOKENS.__getitem__, 'ap", "repr(data[i])) x = data[i] i += 1 if x == gg: break elif", "1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases", "xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg, *tail]", "= ['$', '$'] i = 0 while True: # print('** ', i, repr(stack),", "MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res", "0))) (new_state, images) = self._interact(self.state, mouse or (0, 0)) print('<', (new_state)) # print('<',", "'GG'.split()) test_cases = [ [], [42], (2, 7), [(3, 1)], [[],[],[]], [0, [42,", "yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap,", "num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg,", "num yield int(item) return list(encode(data)) class _partial: def __init__(self, arg): self.arg = arg", "] for data in test_cases: image = MachineImage().encode_lists(data) image += [gg] rev =", "def emit(fn, args): fringe = [(fn, args)] while fringe: fn, args = fringe.pop()", "== gg: break elif x == ap: stack.append('$') elif x == nil: stack.append([]);", "(p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag,", "7), [(3, 1)], [[],[],[]], [0, [42, 11, 12], 3, (8, 9)], ] for", "__repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__,", "head == cons: xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs", "cdr SCAN number FUN DEF galaxy GG' _Tokens = {s:i for i, s", "self.galexy.load_machine.argtypes = (p64,) self.galexy.load_machine.restype = None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state,", "fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return list(encode(data)) class _partial: def __init__(self,", "_Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self):", "(head.arg, *tail) else: xs = (head.arg, tail) else: raise Exception((head, tail)) stack[-3:] =", "mul add lt div i t f cdr SCAN number FUN DEF galaxy", "b s isnil car eq mul add lt div i t f cdr", "def _evaluate(self, state, event): self.galexy.load_machine(None) image = MachineImage().emit_call('galaxy', state, event) data = (ctypes.c_int64", "i t f cdr SCAN number FUN DEF galaxy GG' _Tokens = {s:i", "elif isinstance(args, (list, tuple)) and (len(args) == 0): yield self.TOKENS[fn] else: yield ap", "nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data): fringe =", "number nil GG'.split()) def reduce(stack): while (stack[-3] == '$') and (stack[-2] != '$'):", "xs = (head.arg, tail) else: raise Exception((head, tail)) stack[-3:] = [xs] stack =", "def __init__(self, arg): self.arg = arg def __repr__(self): return f'Partial({repr(self.arg)})' def decode_lists(self, data):", "elif x == num: stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return stack[-1]", "[] fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so') build_target", "if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item)", "isinstance(item, list) and (len(item) == 0): yield nil elif isinstance(item, (list, tuple)): yield", "{s:i for i, s in enumerate(_known_tokens.split(), 1)} class AlienProxy: def __init__(self): pass class", "and (len(item) == 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0): yield", "reduce(stack): while (stack[-3] == '$') and (stack[-2] != '$'): head, tail = stack[-2],", "stack.append(data[i]); i += 1; reduce(stack) else: stack.append(x) return stack[-1] def run_tests(self): gg, =", "gg, = map(self.TOKENS.__getitem__, 'GG'.split()) test_cases = [ [], [42], (2, 7), [(3, 1)],", "yield num yield int(item) return list(encode(data)) class _partial: def __init__(self, arg): self.arg =", "GG'.split()) def emit(fn, args): fringe = [(fn, args)] while fringe: fn, args =", "self.state = [] fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else", "= new_state self._render_frame(images) if __name__ == '__main__': g = Galaxy() r = g.eval_step((0,0))", "0 while True: # print('** ', i, repr(stack), '--', repr(data[i])) x = data[i]", "div i t f cdr SCAN number FUN DEF galaxy GG' _Tokens =", "map(self.TOKENS.__getitem__, 'ap number GG'.split()) def emit(fn, args): fringe = [(fn, args)] while fringe:", "flag, state, data = self._evaluate(state, event) if (flag == 0): return (state, data)", "lt div i t f cdr SCAN number FUN DEF galaxy GG' _Tokens", "stack[-2], stack[-1] if head == cons: xs = self._partial(tail) elif isinstance(head, self._partial): if", "fringe: fn, args = fringe.pop() if fn is None: yield from self.encode_lists(args) elif", "cons number nil GG'.split()) def reduce(stack): while (stack[-3] == '$') and (stack[-2] !=", "ap yield cons fringe.append(item[1:]) fringe.append(item[0]) else: yield num yield int(item) return list(encode(data)) class", "SCAN number FUN DEF galaxy GG' _Tokens = {s:i for i, s in", "= map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def encode(data): fringe = [data] while", "print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64 = ctypes.POINTER(ctypes.c_int64) u32 = ctypes.c_uint32 self.galexy.evaluate.argtypes = (u32,", "== 1): fringe.append(item[0]) elif isinstance(item, list) and (len(item) == 0): yield nil elif", "[gg] rev = MachineImage().decode_lists(image) assert rev == data, (rev, data) class Galaxy: def", "'' fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn)) print(repr(str(fn))) self.galexy = ctypes.cdll.LoadLibrary(fn) p64", "api_key=api_key) def _interact(self, state, event): flag, state, data = self._evaluate(state, event) if (flag", "else: raise Exception((head, tail)) stack[-3:] = [xs] stack = ['$', '$'] i =", "__init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args): ap, num, gg", "ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split()) def", "= None self.space = SpaceClient(api_host=api_host, api_key=api_key) def _interact(self, state, event): flag, state, data", "(flag == 0): return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event):", "fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num, nil, gg", "res = self.space.send(data) print('~>', repr(res)) return res def _render_frame(self, images): self.frame = images", "class AlienProxy: def __init__(self): pass class MachineImage: TOKENS = dict(_Tokens) def emit_call(self, *args):", "if isinstance(tail, list): xs = [head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg,", "[0, [42, 11, 12], 3, (8, 9)], ] for data in test_cases: image", "= [] fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so')", "nil elif isinstance(item, (list, tuple)): yield ap yield ap yield cons fringe.append(item[1:]) fringe.append(item[0])", "= [head.arg, *tail] elif isinstance(tail, tuple): xs = (head.arg, *tail) else: xs =", "self._evaluate(state, event) if (flag == 0): return (state, data) return self._interact(state, self._send_to_alien(data)) def", "while fringe: item = fringe.pop() if isinstance(item, tuple) and (len(item) == 1): fringe.append(item[0])", "isinstance(args, (list, tuple)) and (len(args) == 0): yield self.TOKENS[fn] else: yield ap fringe.append((None,", "data = (ctypes.c_int64 * len(image))(*image) res = self.galexy.evaluate(len(image), data) res = MachineImage().decode_lists(res) #", "ap: stack.append('$') elif x == nil: stack.append([]); reduce(stack) elif x == num: stack.append(data[i]);", "yield self.TOKENS[fn] else: yield ap fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def", "eval_step(self, mouse): print('>', (self.state)) print('>', (mouse or (0, 0))) (new_state, images) = self._interact(self.state,", "'ap cons nil neg c b s isnil car eq mul add lt", "'$') and (stack[-2] != '$'): head, tail = stack[-2], stack[-1] if head ==", "isnil car eq mul add lt div i t f cdr SCAN number", "or (0, 0))) (new_state, images) = self._interact(self.state, mouse or (0, 0)) print('<', (new_state))", "= (head.arg, tail) else: raise Exception((head, tail)) stack[-3:] = [xs] stack = ['$',", "f cdr SCAN number FUN DEF galaxy GG' _Tokens = {s:i for i,", "= [data] while fringe: item = fringe.pop() if isinstance(item, tuple) and (len(item) ==", "def decode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number", "f'Partial({repr(self.arg)})' def decode_lists(self, data): ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons", "cons: xs = self._partial(tail) elif isinstance(head, self._partial): if isinstance(tail, list): xs = [head.arg,", "'ap number GG'.split()) def emit(fn, args): fringe = [(fn, args)] while fringe: fn,", "fringe.append((None, args[-1])) fringe.append((fn, args[:-1])) return list(emit(args[0], args[1:])) def encode_lists(self, data): ap, cons, num,", ".space import SpaceClient _known_tokens = 'ap cons nil neg c b s isnil", "= 0 while True: # print('** ', i, repr(stack), '--', repr(data[i])) x =", "image += [gg] rev = MachineImage().decode_lists(image) assert rev == data, (rev, data) class", "return (state, data) return self._interact(state, self._send_to_alien(data)) def _evaluate(self, state, event): self.galexy.load_machine(None) image =" ]
[ "django import forms class RulesModelMultipleChoiceField(forms.ModelMultipleChoiceField): def label_from_instance(self, obj): return '%(rule_name)s' % {'rule_name': obj.name}", "from django import forms class RulesModelMultipleChoiceField(forms.ModelMultipleChoiceField): def label_from_instance(self, obj): return '%(rule_name)s' % {'rule_name':" ]
[ "print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with", "None: raise Exception(\"Evaluation input or output not passed properly to evaluate.\") batch_size =", "{fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model print_and_log(f'Training", "LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors. #", "2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. #", "{epochs}, batch size: {batch_size}') validation_data = None callbacks = None if training_set.has_validation(): print_and_log(\"Validation", "if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model,", "distribution. # # Carnegie Mellon® is registered in the U.S. Patent and Trademark", "VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold =", "UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE", "KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED", "from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def", "A Step Towards Realistic Drift Detection in Production MLSystems - Code # Copyright", "Realistic Drift Detection in Production MLSystems - Code # Copyright 2022 Carnegie Mellon", "how model is performing by selecting different sets to train/validate.\"\"\" # Define the", "This material has been approved for public release and unlimited distribution. Please see", "WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released", "PyData Development Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021", "SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.", "import KFold import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has", "if evaluation_input is None or evaluation_output is None: raise Exception(\"Evaluation input or output", "print(\"Done training!\", flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an", "training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] *", "evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input = self.evaluation_input", "fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print", "\"\"\"Splits a dataset and trains the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance,", "train a model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module self.config_params = config_params", "Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.", "callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training", "Lambda Foundry, Inc. and PyData Development Team, and open source contributors. # 3.", "es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def", "validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data,", "scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; '", "parameters: epochs: {epochs}, batch size: {batch_size}') validation_data = None callbacks = None if", "= \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es,", "USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF", "acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how", "model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples:", "get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and allow early stopping when needed.\"\"\"", "EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO,", "see license.txt or contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A]", "Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation", "= None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks =", "early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave", "+ 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains", "Store evaluation input/outputs as the validation split, in case evaluation is done later.", "FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a MIT", "in the U.S. Patent and Trademark Office by Carnegie Mellon University. # #", "self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of", "loss_per_fold = [] fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): #", "(https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California. # 2. Pandas", "2022 Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND", "registered in the U.S. Patent and Trademark Office by Carnegie Mellon University. #", "FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE", "samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation input/outputs as the validation", "f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store", "returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])},", "NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels", "{history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def evaluate(self, trained_model,", "as tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to train a", "scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return scores", "({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]},", "(https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy", "print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history", "WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING,", "print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}:", "OR COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style license, please see", "UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO", "\"\"\"Has functionalities to train a model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module", "None: evaluation_input = self.evaluation_input if self.evaluation_output is not None: evaluation_output = self.evaluation_output if", "= fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset", "makes use of the following Third-Party Software subject to its own license: #", "self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs:", "print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input = self.evaluation_input if", "INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY,", "{dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set)", "and distribution. # # Carnegie Mellon® is registered in the U.S. Patent and", "California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry,", "scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model is performing", "model, history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores", "def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model is performing by", "TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE", "validation split, in case evaluation is done later. self.evaluation_input = training_set.x_validation self.evaluation_output =", "Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt)", "msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model", "functionalities to train a model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module self.config_params", "' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None,", "Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for", "MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY", "evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None:", "def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the configured model, returning it.\"\"\"", "Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.", "if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history", "different sets to train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold", "self.evaluation_input if self.evaluation_output is not None: evaluation_output = self.evaluation_output if evaluation_input is None", "self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data =", "to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss", "Carnegie Mellon University. # # This Software includes and/or makes use of the", "Regents of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR", "trains the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()},", "{len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation input/outputs as the validation split,", "2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels", "# [DISTRIBUTION STATEMENT A] This material has been approved for public release and", "MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES", "<NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development", "to train a model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module self.config_params =", "the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, '", "train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\")", "OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY", "case evaluation is done later. self.evaluation_input = training_set.x_validation self.evaluation_output = training_set.y_validation return trained_model", "print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) #", "Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn", "ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR", "Patent and Trademark Office by Carnegie Mellon University. # # This Software includes", "= trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return scores def", "batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}')", "1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training", "DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log", "WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT", "a dataset and trains the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\"))", "self.evaluation_output if evaluation_input is None or evaluation_output is None: raise Exception(\"Evaluation input or", "CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\"", "' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\",", "# # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL", "RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE", "f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}')", "# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt)", "INFRINGEMENT. # # Released under a MIT (SEI)-style license, please see license.txt or", "sets to train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold =", "University. # # This Software includes and/or makes use of the following Third-Party", "from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class", "= [] loss_per_fold = [] fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(),", "(https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.", "Evaluation loss and acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation", "PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style license,", "includes and/or makes use of the following Third-Party Software subject to its own", "under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full", "= None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation,", "model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss:", "100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no + 1 print_and_log(\"Done with", "[es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\")", "print(f'Done! Evaluation loss and acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold", "evaluation_output is None: raise Exception(\"Evaluation input or output not passed properly to evaluate.\")", "* 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no + 1 print_and_log(\"Done", "model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input =", "license.txt or contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A] This", "to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization", "number fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits", "and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.", "print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data = None", "@staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and allow early stopping", "print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation():", "for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to", "OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF", "This Software includes and/or makes use of the following Third-Party Software subject to", "contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy", "\"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training", "has been approved for public release and unlimited distribution. Please see Copyright notice", "= self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples:", "test_index) # Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history =", "\"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED", "self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))}", "Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016", "MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS.", "self.evaluation_input is not None: evaluation_input = self.evaluation_input if self.evaluation_output is not None: evaluation_output", "return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting", "RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under", "KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # #", "stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave =", "Mellon University. # # This Software includes and/or makes use of the following", "None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation)", "self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model,", "f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True)", "2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6.", "= self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result", "{history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done", "matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044 from sklearn.model_selection import", "[] loss_per_fold = [] fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()):", "helper callbacks to save checkpoints and allow early stopping when needed.\"\"\" file_path =", "callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks", "Detection in Production MLSystems - Code # Copyright 2022 Carnegie Mellon University. #", "been approved for public release and unlimited distribution. Please see Copyright notice for", "history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs):", "by selecting different sets to train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS", "= self.train(training_set) # Store evaluation input/outputs as the validation split, in case evaluation", "Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.", "# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright", "callbacks to save checkpoints and allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\"", "class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def __init__(self, model_module, config_params): self.model_module", "2016 Matplotlib development team. # # DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks", "evaluation acc_per_fold = [] loss_per_fold = [] fold_no = 1 for train_index, test_index", "for full terms. # # [DISTRIBUTION STATEMENT A] This material has been approved", "mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\")", "return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model is", "training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final", "in Production MLSystems - Code # Copyright 2022 Carnegie Mellon University. # #", "= self.evaluation_output if evaluation_input is None or evaluation_output is None: raise Exception(\"Evaluation input", "training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history", "f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number", "of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) #", "self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation,", "and Trademark Office by Carnegie Mellon University. # # This Software includes and/or", "{history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does", "epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, '", "6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib", "evaluation_input = self.evaluation_input if self.evaluation_output is not None: evaluation_output = self.evaluation_output if evaluation_input", "its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the", "2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and", "print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the configured", "Matplotlib development team. # # DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as", "Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code #", "THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN", "trained_model, history = self.train(training_set) # Store evaluation input/outputs as the validation split, in", "distribution. Please see Copyright notice for non-US Government use and distribution. # #", "source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4.", "msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size", "self.evaluation_input = None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to", "shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold = [] loss_per_fold = []", "BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR", "None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5)", "# # This Software includes and/or makes use of the following Third-Party Software", "size: {batch_size}') validation_data = None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\")", "validation_data = None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data =", "print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation", "# Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset,", "MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY", "use and distribution. # # Carnegie Mellon® is registered in the U.S. Patent", "= 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------')", "to check how model is performing by selecting different sets to train/validate.\"\"\" #", "and PyData Development Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright", "batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5):", "evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and", "OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.", "1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the", "# Increase fold number fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\") def", "fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] *", "- Code # Copyright 2022 Carnegie Mellon University. # # NO WARRANTY. THIS", "train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True)", "import print_and_log class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def __init__(self, model_module,", "University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC,", "allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\")", "training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation:", "Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score", "2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib", "and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.", "print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit", "= self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}')", "Increase fold number fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self,", "of the following Third-Party Software subject to its own license: # 1. Tensorflow", "= config_params self.evaluation_input = None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper", "= (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size,", "University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE", "use of the following Third-Party Software subject to its own license: # 1.", "3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright", "ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES", "or output not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input,", "Copyright notice for non-US Government use and distribution. # # Carnegie Mellon® is", "kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold = []", "Copyright 2016 Matplotlib development team. # # DM22-0044 from sklearn.model_selection import KFold import", "public release and unlimited distribution. Please see Copyright notice for non-US Government use", "data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train,", "# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL>", "fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and", "MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH", "Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers,", "<EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A] This material has been", "ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR", "def __init__(self, model_module, config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input = None", "{model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0])", "loss and acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to", "evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not", "model evaluation acc_per_fold = [] loss_per_fold = [] fold_no = 1 for train_index,", "Step Towards Realistic Drift Detection in Production MLSystems - Code # Copyright 2022", "in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...')", "FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY", "print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def", "if self.evaluation_output is not None: evaluation_output = self.evaluation_output if evaluation_input is None or", "statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE)", "test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no}", "# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7.", "Cross Validation model evaluation acc_per_fold = [] loss_per_fold = [] fold_no = 1", "AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON", "# This Software includes and/or makes use of the following Third-Party Software subject", "of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda", "training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting", "not None: evaluation_output = self.evaluation_output if evaluation_input is None or evaluation_output is None:", "configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training", "STATEMENT A] This material has been approved for public release and unlimited distribution.", "print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input = self.evaluation_input if self.evaluation_output", "' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold", "non-US Government use and distribution. # # Carnegie Mellon® is registered in the", "training with hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data = None callbacks", "config_params self.evaluation_input = None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks", "for non-US Government use and distribution. # # Carnegie Mellon® is registered in", "Please see Copyright notice for non-US Government use and distribution. # # Carnegie", "is performing by selecting different sets to train/validate.\"\"\" # Define the K-fold Cross", "' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy:", "# DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from utils.logging import", "Copyright 2014 The Regents of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE)", "training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]},", "Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING", "\".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave]", "by Carnegie Mellon University. # # This Software includes and/or makes use of", "' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) #", "Development Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The", "callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if", "model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output = None @staticmethod def get_callbacks(patience=2):", "Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index,", "samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation", "Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon", "FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style", "# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS", "IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS", "history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores =", "self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size:", "1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California. #", "= [] fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate", "self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model,", "train_index, test_index) # Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history", "INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO", "{training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]};", "for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1]", "is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.", "2014 The Regents of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright", "license, please see license.txt or contact <EMAIL> for full terms. # # [DISTRIBUTION", "to train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds,", "self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}')", "# Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) #", "or contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A] This material", "a model\"\"\" def __init__(self, model_module, config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input", "train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold", "development team. # # DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb", "\"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input", "print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%')", "raise Exception(\"Evaluation input or output not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\")", "the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross", "# Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set)", "= self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters:", "# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044 from", "val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def evaluate(self,", "MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS", "print_and_log class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def __init__(self, model_module, config_params):", "= tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self,", "(https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy", "IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF", "batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}')", "model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper", "{training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}')", "{len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation input/outputs", "= model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output = None @staticmethod def", "training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model print_and_log(f'Training fold samples:", "Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE", "Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044", "cross-validation to check how model is performing by selecting different sets to train/validate.\"\"\"", "tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model()", "Third-Party Software subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014", "= self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}')", "in case evaluation is done later. self.evaluation_input = training_set.x_validation self.evaluation_output = training_set.y_validation return", "MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY,", "model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization metrics", "CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT", "model_module, config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output =", "AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER", "A] This material has been approved for public release and unlimited distribution. Please", "Inc. and PyData Development Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING)", "the validation split, in case evaluation is done later. self.evaluation_input = training_set.x_validation self.evaluation_output", "fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]}", "training_set.has_validation(): print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history =", "fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model", "U.S. Patent and Trademark Office by Carnegie Mellon University. # # This Software", "[] fold_no = 1 for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a", "model is performing by selecting different sets to train/validate.\"\"\" # Define the K-fold", "AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open", "OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY", "ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND,", "OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF", "# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by", "selecting different sets to train/validate.\"\"\" # Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\")", "NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS", "scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>,", "OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON", "self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and", "split, in case evaluation is done later. self.evaluation_input = training_set.x_validation self.evaluation_output = training_set.y_validation", "notice for non-US Government use and distribution. # # Carnegie Mellon® is registered", "Government use and distribution. # # Carnegie Mellon® is registered in the U.S.", "scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021", "samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples:", "input/outputs as the validation split, in case evaluation is done later. self.evaluation_input =", "Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California. # 2.", "license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of", "self.config_params = config_params self.evaluation_input = None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets", "' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation input/outputs as", "training!\", flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\"", "(https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044 from sklearn.model_selection import KFold", "NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT,", "= self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model,", "to save checkpoints and allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es", "{fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100)", "\"\"\"k-fold cross-validation to check how model is performing by selecting different sets to", "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT", "please see license.txt or contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT", "import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to", "is not None: evaluation_output = self.evaluation_output if evaluation_input is None or evaluation_output is", "fold number fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance):", "Validation model evaluation acc_per_fold = [] loss_per_fold = [] fold_no = 1 for", "is None or evaluation_output is None: raise Exception(\"Evaluation input or output not passed", "cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the configured model, returning", "5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018", "# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.", "evaluation_input is None or evaluation_output is None: raise Exception(\"Evaluation input or output not", "hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data = None callbacks = None", "ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def __init__(self, model_module, config_params): self.model_module =", "ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. #", "K-fold Cross Validation model evaluation acc_per_fold = [] loss_per_fold = [] fold_no =", "OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT", "# # Released under a MIT (SEI)-style license, please see license.txt or contact", "FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY", "an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input =", "# # [DISTRIBUTION STATEMENT A] This material has been approved for public release", "TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a", "history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True)", "evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return scores def cross_validate(self, full_dataset,", "tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set):", "epochs: {epochs}, batch size: {batch_size}') validation_data = None callbacks = None if training_set.has_validation():", "COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style license, please see license.txt", "Trademark Office by Carnegie Mellon University. # # This Software includes and/or makes", "cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model is performing by selecting", "for public release and unlimited distribution. Please see Copyright notice for non-US Government", "training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation", "full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set =", "Software includes and/or makes use of the following Third-Party Software subject to its", "ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT", "evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input", "DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM", "of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital", "and/or makes use of the following Third-Party Software subject to its own license:", "evaluation_output = self.evaluation_output if evaluation_input is None or evaluation_output is None: raise Exception(\"Evaluation", "trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if self.evaluation_input is", "open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. #", "dataset and trains the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset", "split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the configured model, returning it.\"\"\" training_set", "unlimited distribution. Please see Copyright notice for non-US Government use and distribution. #", "to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of", "* 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no", "with hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data = None callbacks =", "terms. # # [DISTRIBUTION STATEMENT A] This material has been approved for public", "input or output not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores =", "fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a", "= None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and allow", "MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO", "{scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no =", "UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM", "WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON", "Code # Copyright 2022 Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE", "scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy", "history = self.train(training_set) # Store evaluation input/outputs as the validation split, in case", "return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs =", "check how model is performing by selecting different sets to train/validate.\"\"\" # Define", "(https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright", "WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE", "# Store evaluation input/outputs as the validation split, in case evaluation is done", "config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output = None", "def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and allow early stopping when", "# # DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from utils.logging", "acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no + 1", "samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of", "= self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}') validation_data", "loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no + 1 print_and_log(\"Done with cross-validation!\")", "generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for", "with cross-validation!\") def split_and_train(self, dataset_instance): \"\"\"Splits a dataset and trains the configured model,", "EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES", "the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management,", "when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path,", "PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE", "evaluation\", flush=True) if self.evaluation_input is not None: evaluation_input = self.evaluation_input if self.evaluation_output is", "TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM", "and trains the configured model, returning it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples", "needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True)", "save checkpoints and allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es =", "utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\" def __init__(self,", "patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\"", "Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team,", "print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold", "the following Third-Party Software subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE)", "Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. #", "dataset_instance): \"\"\"Splits a dataset and trains the configured model, returning it.\"\"\" training_set =", "# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc.", "Office by Carnegie Mellon University. # # This Software includes and/or makes use", "SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY", "output not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output,", "FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL.", "self.train(training_set) # Store evaluation input/outputs as the validation split, in case evaluation is", "= KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold = [] loss_per_fold", "and allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience,", "= model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): '", "= self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]}", "metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold", "NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED", "model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\",", "result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy: {history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss:", "it.\"\"\" training_set = self.model_module.split_data(dataset_instance, self.config_params.get(\"validation_size\")) print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, '", "team. # # DM22-0044 from sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from", "# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright", "developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. #", "see Copyright notice for non-US Government use and distribution. # # Carnegie Mellon®", "batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc:", "BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR", "Define the K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold", "Software subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The", "not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size)", "save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs", "LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED", "tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to train", "batch size: {batch_size}') validation_data = None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data", "data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate", "own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University", "= self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch", "KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model evaluation acc_per_fold = [] loss_per_fold =", "Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. #", "validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, ' f'accuracy:", "Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie", "CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,", "TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style license, please", "AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR", "tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities to train a model\"\"\"", "{scores[0]}; ' f'{model.metrics_names[1]} of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase", "The Regents of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021", "or evaluation_output is None: raise Exception(\"Evaluation input or output not passed properly to", "Exception(\"Evaluation input or output not passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores", "print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data", "# # Carnegie Mellon® is registered in the U.S. Patent and Trademark Office", "full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model is performing by selecting different", "\"\"\"Gets helper callbacks to save checkpoints and allow early stopping when needed.\"\"\" file_path", "self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return", "[DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited", "kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set", "not None: evaluation_input = self.evaluation_input if self.evaluation_output is not None: evaluation_output = self.evaluation_output", "MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK,", "# K-fold Cross Validation model evaluation acc_per_fold = [] loss_per_fold = [] fold_no", "file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss', patience=patience, mode=\"min\") msave = tfcb.ModelCheckpoint(file_path, save_best_only=True) return", "K-fold Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation", "(SEI)-style license, please see license.txt or contact <EMAIL> for full terms. # #", "# Copyright 2022 Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON", "following Third-Party Software subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright", "release and unlimited distribution. Please see Copyright notice for non-US Government use and", "Cross Validator print_and_log(\"CROSS VALIDATION\") kfold = KFold(n_splits=num_folds, shuffle=True) # K-fold Cross Validation model", "def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model = self.model_module.create_model() epochs = self.config_params.get(\"epochs\") batch_size =", "Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData", "subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents", "2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and", "num_folds=5): \"\"\"k-fold cross-validation to check how model is performing by selecting different sets", "performing by selecting different sets to train/validate.\"\"\" # Define the K-fold Cross Validator", "{history.history.get(\"accuracy\")[-1]}') if training_set.has_validation(): print_and_log(f'Validation: val_loss: {history.history.get(\"val_loss\")[-1]}, ' f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return", "# Generate generalization metrics print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}') scores = self.evaluate(model, training_set.x_validation, training_set.y_validation)", "f'val_accuracy: {history.history.get(\"val_accuracy\")[-1]}') print(\"Done training!\", flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None):", "epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks) print_and_log(f'Final training result ({len(history.history.get(\"loss\"))} epochs): ' f'loss: {history.history.get(\"loss\")[-1]}, '", "Production MLSystems - Code # Copyright 2022 Carnegie Mellon University. # # NO", "(training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, callbacks=callbacks)", "as the validation split, in case evaluation is done later. self.evaluation_input = training_set.x_validation", "Copyright 2022 Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY", "The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5.", "passed properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done!", "= self.evaluation_input if self.evaluation_output is not None: evaluation_output = self.evaluation_output if evaluation_input is", "checkpoints and allow early stopping when needed.\"\"\" file_path = \".model_weights.hdf5\" es = tfcb.EarlyStopping('val_loss',", "for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()): # Generate a print print('------------------------------------------------------------------------') print_and_log(f'Training for", "Drift Detection in Production MLSystems - Code # Copyright 2022 Carnegie Mellon University.", "4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021", "KFold import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class ModelTrainer: \"\"\"Has functionalities", "and acc: {scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check", "100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no +", "{batch_size}') validation_data = None callbacks = None if training_set.has_validation(): print_and_log(\"Validation data found\") validation_data", "trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation loss and acc: {scores}') return scores def cross_validate(self,", "7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044 from sklearn.model_selection", "found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train, training_set.y_train, epochs=epochs,", "MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms. #", "MLSystems - Code # Copyright 2022 Carnegie Mellon University. # # NO WARRANTY.", "NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER", "statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # #", "training_set.x_validation, training_set.y_validation) print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; ' f'{model.metrics_names[1]} of {scores[1]", "developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt)", "= None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save", "contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A] This material has", "self.model_module = model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output = None @staticmethod", "fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) # Generate generalization metrics print_and_log(f'Evaluation fold", "epochs = self.config_params.get(\"epochs\") batch_size = self.config_params.get(\"batch_size\") print_and_log(f'Starting training with hyper parameters: epochs: {epochs},", "def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\") print(\"Starting evaluation\", flush=True) if", "sklearn.model_selection import KFold import tensorflow.keras.callbacks as tfcb from utils.logging import print_and_log class ModelTrainer:", "None self.evaluation_output = None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints", "__init__(self, model_module, config_params): self.model_module = model_module self.config_params = config_params self.evaluation_input = None self.evaluation_output", "# Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code", "(https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development", "numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy", "samples {dataset_instance.get_number_of_samples()}, ' f'training samples: {len(training_set.x_train[0])}, ' f'validation samples: {len(training_set.x_validation[0])}') trained_model, history =", "is None: raise Exception(\"Evaluation input or output not passed properly to evaluate.\") batch_size", "None or evaluation_output is None: raise Exception(\"Evaluation input or output not passed properly", "None @staticmethod def get_callbacks(patience=2): \"\"\"Gets helper callbacks to save checkpoints and allow early", "None: evaluation_output = self.evaluation_output if evaluation_input is None or evaluation_output is None: raise", "full terms. # # [DISTRIBUTION STATEMENT A] This material has been approved for", "acc_per_fold = [] loss_per_fold = [] fold_no = 1 for train_index, test_index in", "print_and_log(\"Validation data found\") validation_data = (training_set.x_validation, training_set.y_validation) callbacks = self.get_callbacks(patience=5) history = model.fit(training_set.x_train,", "f'validation samples: {len(training_set.x_validation[0])}') trained_model, history = self.train(training_set) # Store evaluation input/outputs as the", "properly to evaluate.\") batch_size = self.config_params.get(\"batch_size\") scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size) print(f'Done! Evaluation", "approved for public release and unlimited distribution. Please see Copyright notice for non-US", "is not None: evaluation_input = self.evaluation_input if self.evaluation_output is not None: evaluation_output =", "material has been approved for public release and unlimited distribution. Please see Copyright", "of {scores[1] * 100}%') acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no", "= tfcb.ModelCheckpoint(file_path, save_best_only=True) return [es, msave] def train(self, training_set): \"\"\"Train.\"\"\" print_and_log(\"TRAINING\") model =", "if self.evaluation_input is not None: evaluation_input = self.evaluation_input if self.evaluation_output is not None:", "EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY", "the U.S. Patent and Trademark Office by Carnegie Mellon University. # # This", "Fit data to model print_and_log(f'Training fold samples: {training_set.num_train_samples}') model, history = self.train(training_set) #", "a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.", "Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source", "flush=True) return model, history def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None): \"\"\"Does an evaluation.\"\"\" print_and_log(\"EVALUATION\")", "{scores}') return scores def cross_validate(self, full_dataset, num_folds=5): \"\"\"k-fold cross-validation to check how model", "flush=True) if self.evaluation_input is not None: evaluation_input = self.evaluation_input if self.evaluation_output is not", "Foundry, Inc. and PyData Development Team, and open source contributors. # 3. scikit-learn", "self.evaluation_output is not None: evaluation_output = self.evaluation_output if evaluation_input is None or evaluation_output", "...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index) # Fit data to model print_and_log(f'Training fold", "evaluation input/outputs as the validation split, in case evaluation is done later. self.evaluation_input", "THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND", "Towards Realistic Drift Detection in Production MLSystems - Code # Copyright 2022 Carnegie", "a print print('------------------------------------------------------------------------') print_and_log(f'Training for fold {fold_no} ...') training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index)" ]
[ "request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\")", "if date.day=='sunday': import datetime today = datetime.date.today() sunday = today + datetime.timedelta((6 -", "return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting'))", "datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit()", "render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error)", "render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50),", "request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event", "busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY =", "text #from flaskblog import #from flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm,", "- today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if", "Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db", "= request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday)", "unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False,", "def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id =", "@app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname')", "DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None", "return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global", "db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate", "flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import os #import login_user", "global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname')", "class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False,", "def schedule(): event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii')", "Posts #from flaskblog.forms import RegistrationForm, LoginForm import os #import login_user #from flask import", "= Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today()", "#from flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_", ",error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata():", "DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or EMAIL==\"", "BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\")", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today() thursday = today", "person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if today2", "#login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True)", "date)).all() for seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html',", "render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None if request.method == 'POST':", "render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected", "DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return", "!= 'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please try again.'", "datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit()", "session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all()", "username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event =", "b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET'])", "@app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return", "ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1')", "flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager", "or SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY)", "request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html',", "FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall)", "Flask, render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import BytesIO from sqlalchemy.orm", "TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return", "update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET'])", "print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today", "x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if today2 != today: update5", "schedule(): event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return", "bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today =", "== date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in seat: if seat1==None: return", "SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\")", "= datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1", "Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today() saterday", "@app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\")", "entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today =", "#booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return", "#prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True)", "datetime today = datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday() % 7))", "nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"])", "if request.method == 'POST': if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error", "global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if", "import datetime today = datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday() %", "def booking1(): global person noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all()", "BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat)", "# n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None", "Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today() monday = today +", "db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image", "noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1:", "file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or", "image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of", "request.method == 'POST': if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error =", "from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import base64 from sqlalchemy import", "import func import sqlite3 from sqlalchemy.sql import text #from flaskblog import #from flaskblog.models", "Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True)", "z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday ==", "#from flaskblog import #from flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm", "Month==\" \" or GANDER==\" \" or PASSWORD==\" \" or file==\" \": return render_template('name.html'", "loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected \"", "CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT})", "7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime", "type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def", "datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5", "location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats))))", "= datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1", "seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10),", "dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id)", "seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT", "tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today() sunday =", "unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False,", "nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50),", "base64 from sqlalchemy import func import sqlite3 from sqlalchemy.sql import text #from flaskblog", "ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary)", "person noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in", "update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type", "or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html',", "or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit()", "today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 =", "flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login", "entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event", "city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday)", "or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please try again.' else: session['logged_in']=True", "= Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today()", "friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method ==", "if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person", "tuesday = today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday)", "event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return", "db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x})", "= SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True)", "#return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True)", "b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a') #number_trained = db.session.execute(text(\"select", "unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True)", "busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True)", "today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today() saterday = today", "X = int(person) * int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'})", "datetime today = datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday() % 7))", "entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today =", "= Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X)", "datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 =", "today + datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5 =", "seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET'])", "render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def", "% 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import", "friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today() sunday = today +", "#from flaskblog.forms import RegistrationForm, LoginForm import os #import login_user #from flask import flask_login", "date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST'])", "'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please try again.' else:", "request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no", "buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login():", "!= today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if", "import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from", "WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None :", "unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True)", "flask_sqlalchemy import SQLAlchemy from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64", "today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key =", "return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1:", "for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a')", "= <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50),", "datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7)", "type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT", "\" or LAST==\" \" or EMAIL==\" \" or Month==\" \" or GANDER==\" \"", "Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def", "+ datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\"", "#name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20)", "print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date']", "SQLAlchemy from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode", "import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint", "LAST==\" \" or EMAIL==\" \" or Month==\" \" or GANDER==\" \" or PASSWORD==\"", "today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday':", "update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person)", "% 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import", "return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image =", "datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if", "- today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 =", "Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate", "today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday':", "or GANDER==\" \" or PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error) else:", "id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday ==", "% 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit()", "day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats')", "#import login_user #from flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy", "= Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today()", "- today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if", "Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def", "busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global day", "money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return", "return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats))))", "print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today", ",methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats')", "person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday ==", "\" or Month==\" \" or GANDER==\" \" or PASSWORD==\" \" or file==\" \":", "entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today =", "SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global day global type busname1=select.busname", "app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True,", "SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50),", "busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50),", "in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error", "date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in seat: if seat1==None: return render_template('booking.html',", "request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1,", "or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return", "collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all()", "if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate)", "today = datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday() % 7)) print(friday)", "= today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5", "def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id):", "@app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2:", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today() friday = today", "db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today() friday = today + datetime.timedelta((4", "collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED", "render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii')", "buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id =", "db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses", "unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def", "date.friday=='friday': import datetime today = datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday()", "print(today2) X = int(person) * int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats:", "== date)).all() for seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat) else: return", "sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import base64 from sqlalchemy import func", "def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global", "update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1,", "return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected \" person=request.form.get('person')", "render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: #", "again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event", "added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global day global", "SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type')", "db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE", "monday = today + datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday)", "= b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage',", "SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else:", "nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return", "print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today", "x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if today2 !=", "else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday", "None if request.method == 'POST': if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>':", "download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule():", "def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def", "render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET'])", "render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima) image", "logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager()", "= 'login' #from app.admin import admin_blueprint from datetime import datetime today=datetime.now print(today) app", "= today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5", "from flask import Flask, render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import", "from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import", "import datetime today = datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday() %", "from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in", "update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all()", "def loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted", "TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1,", "render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\":", "today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday':", "nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True)", "Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today() friday = today +", "Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm =", "nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True)", "x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return", "image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image)", "in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum)", "!= '<PASSWORD>': error = 'Invalid Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html')", "BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT})", "render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday ==", "nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True)", "return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def", "render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal()", "if date.thursday=='thursday': import datetime today = datetime.date.today() thursday = today + datetime.timedelta((3 -", "year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False,", "unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False,", "today = datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday)", "monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def", "LoginForm import os #import login_user #from flask import flask_login #from flask import Login_Manager,", "today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return", "nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today", "for event in event2: image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('schedule.html' ,rows=event2,data=list,image=image)", "date.thursday=='thursday': import datetime today = datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday()", "print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today", "import datetime today = datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday() %", "rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS", "== date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday ==", "return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday", "tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home():", "unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\"", "nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all()", "in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id", "mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if", "ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True)", "unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello():", "TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if", "render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected \" person=request.form.get('person') city", "LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\"", "% 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import", "import text #from flaskblog import #from flaskblog.models import User, Posts #from flaskblog.forms import", "error = 'Invalid Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return", "TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html')", "admin_blueprint from datetime import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS']", "day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME =", "@app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year')", "saterday = today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday})", "error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if", "in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person)", "seat in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X =", "type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first()", "@app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city", "render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\":", "#for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id):", "from flask_sqlalchemy import SQLAlchemy from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from", ": return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return", "Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def", "import scoped_session,sessionmaker from base64 import b64encode import base64 from sqlalchemy import func import", "person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date,", "b64encode import base64 from sqlalchemy import func import sqlite3 from sqlalchemy.sql import text", "if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True", "== 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\"", "@app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first()", "@app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking',", "print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today", "nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True)", "Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday", "AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return", "if date.monday=='monday': import datetime today = datetime.date.today() monday = today + datetime.timedelta((0 -", "b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a') #number_trained", "added=added) return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image", "email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True)", "or_ print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login'", "import datetime today = datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday() %", "base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if", "datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 =", "or EMAIL==\" \" or Month==\" \" or GANDER==\" \" or PASSWORD==\" \" or", "entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today =", "entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for", "TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None", "day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer,", "= Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id", "def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g')", "saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'):", "unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class", "methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in", "= None if request.method == 'POST': if request.form['user'] != 'admin' or request.form['password'] !=", "\" or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html')", "#from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import", "date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday)", "MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or", "datetime today = datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday() % 7))", "booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def", "methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city')", "of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid)", "THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None : return", "viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first()", "render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event)", "#from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin", "= int(person) * int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5", "password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False,", "(request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if", "collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats", "#day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for", "SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1)", "app.admin import admin_blueprint from datetime import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']=", "base64 import b64encode import base64 from sqlalchemy import func import sqlite3 from sqlalchemy.sql", "None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image =", "Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city')", "person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday ==", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z", "#from flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import os #import", "@app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday =", "nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False,", "render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day')", "print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>'", "request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson)", "'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \"", "session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username)", "buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error =", "event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('schedule.html'", "day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST'])", "redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat", "db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False,", "selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id)", ",primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False,", "print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50),", "@app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima) image =", "for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday =", "class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False,", "if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete()", "datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday})", "EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global day global type", "= b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid", "wednesday = today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday)", "int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today})", "in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a') #number_trained =", "b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection", "PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or EMAIL==\" \" or Month==\"", "redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat", "today = datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday() % 7)) if", "def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2: image", "render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class", "print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from", "func import sqlite3 from sqlalchemy.sql import text #from flaskblog import #from flaskblog.models import", "unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking',", "return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if request.method=='POST':", "Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday = today +", "select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat", "file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all()", "nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True)", "EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \"", "+ datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday})", "if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday = today + datetime.timedelta((2 -", "GANDER==\" \" or PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today)", "import Flask, render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import BytesIO from", "from sqlalchemy.sql import text #from flaskblog import #from flaskblog.models import User, Posts #from", "db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first()", "Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True)", "print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today", "if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please", "= Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return", "password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first()", "SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0: x=request.form.get('person')", "- today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if", "unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False,", "(url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global ticket_per_seat global", "import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import os #import login_user #from", "\": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata'", "sqlalchemy.sql import text #from flaskblog import #from flaskblog.models import User, Posts #from flaskblog.forms", "\" or GANDER==\" \" or PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error)", "name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date", "primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False,", "Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses:", "'Invalid Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error)", "added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city')", "ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html')", "entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today =", "print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image():", "return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii')", "\" or PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry)", "<KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False,", "def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat')", "Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today() thursday = today + datetime.timedelta((3", "or PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit()", "LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint from datetime import datetime", "monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today() sunday", "ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima)", "hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email')", "PASSWORD==\" \" or file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return", "nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date):", "methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if", "today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5", "city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\":", "today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday})", "= Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj')", "= Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html')", "ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image", "= request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata():", "date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day ==", "render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city')", "- today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\":", "== date,Busesdata.saterday == date)).all() for seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat)", "render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO", "'50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname", "update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1)", "BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE,", "primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False,", "entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY", "methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"])", "def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1():", "TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date)", "where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: #", "= Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today()", "= today + datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1", "CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def", "base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage():", "date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today() saterday =", "request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please try again.' else: session['logged_in']=True return", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday = today", "file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added)", "global ticket_per_seat global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day", "#date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True", "return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2: image = b64encode(event.ima)", "== date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for", "app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer, primary_key=True, autoincrement=True)", "datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date", "YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or EMAIL==\" \"", "Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or", "if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday = today + datetime.timedelta((1 -", "flaskblog.forms import RegistrationForm, LoginForm import os #import login_user #from flask import flask_login #from", "home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password')", "wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if", "if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday", "date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday()", "@app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat", "day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today()", "= id).first() for collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from", "send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event", "io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import base64", "today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0: x=request.form.get('person') print(x)", "import #from flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import os", "== date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in", "db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in", "global busname global ticket_per_seat global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats", "event in event2: image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('schedule.html' ,rows=event2,data=list,image=image) app.run(debug=True)", "date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True)", "global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME", ",methods=['GET','POST']) def login(): error = None if request.method == 'POST': if request.form['user'] !=", "city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event =", "Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday", "flask import Flask, render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import BytesIO", "today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\":", "os #import login_user #from flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from", "7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime", "gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True)", "+ datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday})", "= today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit()", "render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username']", "seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1)))", "app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager()", "#print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city", ",nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50), unique=False, nullable=True)", "sunday = today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday)", "print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat)", "today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit()", "render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html')", "id).first() for collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata", "dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS", "from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app)", "#print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None if request.method", "date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1", "= today + datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5", "month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True)", "if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima']", "id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection:", "CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or", "in seat: if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return", "def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html'", "b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username", "event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu')", "date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50),", "date.saterday=='saterday': import datetime today = datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday()", "#type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False,", "city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True)", "person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday)", "Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday = today +", "else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event =", "print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname", "SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\")", "import base64 from sqlalchemy import func import sqlite3 from sqlalchemy.sql import text #from", "image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal():", "RegistrationForm, LoginForm import os #import login_user #from flask import flask_login #from flask import", "7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime", "date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all()", "seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit()", "n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for", "request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday)", "date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in seat: if seat1==None:", "date in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday':", "nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False,", "@app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1:", "today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday':", "else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list,", "update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def", "return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return", "print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today", "for collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata where", "if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None : return render_template('busesdata',selectall=selectall) else: print(MONDAY)", "#error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html',", "#print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model):", "import SQLAlchemy from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import", "import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm", "password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return render_template('loginreal.html',invalid=invalid) else:", "if date.friday=='friday': import datetime today = datetime.date.today() friday = today + datetime.timedelta((4 -", "buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for", "return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return", "@app.route('/admin' ,methods=['GET','POST']) def login(): error = None if request.method == 'POST': if request.form['user']", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today() monday =", "#print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n)", "import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import base64 from", "username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None: return", "busname global ticket_per_seat global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city", "return render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html')", "= request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return", "db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\"", "datetime today = datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday() % 7))", "firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10),", "today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if today2 != today: update5 =", "return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST'])", "selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\")", "int(person) * int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 =", "image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password']", "#print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b", "render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima)", "app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class", "entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today =", "= Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET'])", "date.day=='sunday': import datetime today = datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday()", "return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT", "import sqlite3 from sqlalchemy.sql import text #from flaskblog import #from flaskblog.models import User,", "#lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint from datetime", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday = today", "today + datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 =", "db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today() saterday = today + datetime.timedelta((5", "BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return", "return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global busname global", "print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1=", "today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday':", "unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True)", "render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id):", "TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None", "if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if", "nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True,", "if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday':", "date.monday=='monday': import datetime today = datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday()", "datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit()", "person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0':", "#return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person", "if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x)", "url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import BytesIO from sqlalchemy.orm import scoped_session,sessionmaker", "Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection)", "error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete()", "def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG'", "thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method", "event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal',", "#return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date", "nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50),", "seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if", "seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday", "#(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname", "image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return", "PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull)", "print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) * int(seat.ticket_per_seat) if today2 != today:", "nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True) date=db.Column(db.String(40),unique=False, nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model):", "for seat in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X", "= datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1", "request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1) db.session.commit() return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all()", "id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True) bookingdate=db.Column(db.String(50),", "#for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def", "return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname')", "date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today =", "datetime import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False", "name=name1.busname date=name1.bookingdate #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin'", "import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem'", "scoped_session,sessionmaker from base64 import b64encode import base64 from sqlalchemy import func import sqlite3", "seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2)", "rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x)", "@app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return", "if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\")", "BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\")", "* int(seat.ticket_per_seat) if today2 != today: update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date:", "def login(): error = None if request.method == 'POST': if request.form['user'] != 'admin'", "== 'POST': if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid", "Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return", "lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False,", "- today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if", "import datetime today = datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday() %", "db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today() wednesday = today + datetime.timedelta((2", "ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city')", "SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit():", "request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat')", "return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats')", "login_user #from flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import", "#return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html')", "= base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal(): invalid=\"invlid username of password\"", "# n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first()", "unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True)", "request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials. Please try", "7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime", "request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today() monday = today", "seat: if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking))", "redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all()", "Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today() saterday = today +", "return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None if request.method ==", "LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint from", "= request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1,", "render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day')", "city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download():", "#return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else:", "render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n)", "booking1(): global person noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for", "global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday", "file==\" \": return render_template('name.html' ,error=error) else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html')", "= datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person)", "login(): error = None if request.method == 'POST': if request.form['user'] != 'admin' or", "def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list,", "\" or EMAIL==\" \" or Month==\" \" or GANDER==\" \" or PASSWORD==\" \"", "#return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html')", "monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import", "% 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import", "= base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def", "return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in", "or Month==\" \" or GANDER==\" \" or PASSWORD==\" \" or file==\" \": return", "in date1: print(date.date, date.id) day=(date.day) monday=print(date.monday) tuesday=print(date.tuesday) wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import", "saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today() sunday = today + datetime.timedelta((6", "monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if date.tuesday=='tuesday': import datetime", "\" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date in date1: print(date.date, date.id) day=(date.day)", "import b64encode import base64 from sqlalchemy import func import sqlite3 from sqlalchemy.sql import", "if FIRST==\" \" or LAST==\" \" or EMAIL==\" \" or Month==\" \" or", "date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in seat:", "sqlalchemy import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view", "#from app.admin import admin_blueprint from datetime import datetime today=datetime.now print(today) app = Flask(__name__)", "7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday}) db.session.commit() if", "if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return", "collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first())", "global person noperson=\"no person selected \" person=request.form.get('person') city = request.form.get('city') date1=Busesdata.query.filter_by(city=city).all() for date", "Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'}) update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person", "loginreal(): invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is", "today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 =", "BytesIO from sqlalchemy.orm import scoped_session,sessionmaker from base64 import b64encode import base64 from sqlalchemy", "sqlite3 from sqlalchemy.sql import text #from flaskblog import #from flaskblog.models import User, Posts", "seat1: print('g') if person==0: x=request.form.get('person') print(x) x=seat.seats-int(person) today2=seat.date print(today2) X = int(person) *", "error = None if request.method == 'POST': if request.form['user'] != 'admin' or request.form['password']", "% 7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import", "db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today() monday = today + datetime.timedelta((0", "is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image", "= datetime.date.today() monday = today + datetime.timedelta((0 - today.weekday() % 7)) if monday<today:", "else: return render_template('booking.html', rows=seat,date=date) #(int(float((seat1.seats)))-(float(result1))) #return redirect(url_for(booking)) #print(x) #if x<1: #error=\"NO BUS AVELABLE\"", "- today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if", "def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender')", "= datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1", "== date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all() for seat1 in seat: if", "= LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint from datetime import", "unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True)", "import RegistrationForm, LoginForm import os #import login_user #from flask import flask_login #from flask", "@app.route(\"/\") def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname')", "return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if", "invalid=\"invlid username of password\" if request.method=='POST': username=request.form['username'] password=request.form['password'] ragisted=Posts.query.filter_by(firstname=username,password=password).first() if ragisted is None:", "datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 =", "from base64 import b64encode import base64 from sqlalchemy import func import sqlite3 from", "today = datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday)", "User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import os #import login_user #from flask", "ticket_per_seat global city global day global type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type", "+ datetime.timedelta((0 - today.weekday() % 7)) if monday<today: monday=today+datetime.timedelta(7+today.weekday()%7) print(monday) entry1 = Busesdata(date=monday)", "Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today() friday", "sqlalchemy import func import sqlite3 from sqlalchemy.sql import text #from flaskblog import #from", "db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person')", "render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\"", "date,Busesdata.saterday == date)).all() for seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat) else:", "Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit() if date.monday=='monday': import datetime today = datetime.date.today() monday", "updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def download(): file_data=Posts.query.filter_by(ID=1).first() return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True)", "@app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global person noperson=\"no person selected \" person=request.form.get('person') city =", "autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50), unique=False, nullable=True)", "seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download') def", "#number_trained = db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first()", "db.session.commit() if date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday = today + datetime.timedelta((1", "db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today() thursday =", "def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1)", "datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key", "loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in event2: image =", "nullable=True) ima=db.Column(db.LargeBinary,unique=False, nullable=True) class Busesdata(db.Model): id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False,", "def hello(): return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname')", ",as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for event in", "delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED", "buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all()", "loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def booking1(): global", "viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking)", "today}) #delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate =", "import admin_blueprint from datetime import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew'", "flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #from app.admin import", "= Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today() thursday = today +", "#if x<1: #error=\"NO BUS AVELABLE\" #return render_template('booking.html',error=error) #print(x) #name=seat.busname #date=seat.date #prise=seat.ticket_per_seat #city=seat.city #type=seat.type", "= today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5", "if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT =", "#city=seat.city #type=seat.type #return render_template('booking.html', rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50),", "EMAIL==\" \" or Month==\" \" or GANDER==\" \" or PASSWORD==\" \" or file==\"", "date.tuesday=='tuesday': import datetime today = datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday()", "= Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday}) db.session.commit() if date.wednesday=='wednesday': import datetime today = datetime.date.today()", "GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or EMAIL==\" \" or", "today = datetime.date.today() sunday = today + datetime.timedelta((6 - today.weekday() % 7)) print(sunday)", "+ datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit() update5 =", "@app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def", "'<PASSWORD>': error = 'Invalid Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12)", "= Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday}) db.session.commit() if date.saterday=='saterday': import datetime today = datetime.date.today()", "if date.saterday=='saterday': import datetime today = datetime.date.today() saterday = today + datetime.timedelta((5 -", "7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id)) #busname=request.form['busname']", "busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat')", "thursday = today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday)", "'login' #from app.admin import admin_blueprint from datetime import datetime today=datetime.now print(today) app =", "seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for buses", "request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT})", "return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True)", "import os #import login_user #from flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required", "DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\") WEDNESDAY=request.form.get(\"wed\") THURSDAY=request.form.get(\"thu\") FRIDAY=request.form.get(\"fri\") SATERDAY=request.form.get(\"sat\") if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None", "import datetime today = datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday() %", "render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b:", "ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima']", "flaskblog import #from flaskblog.models import User, Posts #from flaskblog.forms import RegistrationForm, LoginForm import", "FIRST==\" \" or LAST==\" \" or EMAIL==\" \" or Month==\" \" or GANDER==\"", "#@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return render_template('loginreal.html') @app.route('/booking', methods=[\"POST\",\"GET\"]) def", "= request.form.get('busname') CITY = request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY})", "datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 =", "SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit() return render_template('busesdata1.html',added=added)", "type busname1=select.busname ticket_per_seat1=select.ticket_per_seat seat1=select.seats city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY", "rows=seat) return render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False,", "datetime today = datetime.date.today() tuesday = today + datetime.timedelta((1 - today.weekday() % 7))", "city1=select.city day1=select.day type1=select.type if request.method==\"POST\": BUSNAME = request.form.get('busname') CITY = request.form.get('city') SEAT =", "nullable=True) password=db.Column(db.String(50), unique=False, nullable=True) month=db.Column(db.String(10), unique=False, nullable=True) day=db.Column(db.String(40),unique=False, nullable=True) gender=db.Column(db.String(4),unique=False, nullable=True) year=db.Column(db.String(10),unique=False, nullable=True)", "unique=False, nullable=True) #day=db.Column(db.String(20),nullable=True) @app.route('/booking', methods=[\"POST\",\"GET\"]) @app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET']) def mybooking1(id,date): sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\")", "7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime", "today = datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday)", "datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5 =", "return render_template('index.html') @app.route(\"/name\",methods=['GET','POST']) def home(): if (request.method == 'POST'): FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month')", "today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday) entry1 = Busesdata(date=wednesday) update5 =", "= 'Invalid Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html',", "@app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit()", "ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date') CITY=request.form.get('city') TYPE=request.form.get('type') file=request.files['ima'] DAY=request.form.get(\"son\") MONDAY=request.form.get(\"mon\") TUESADAY=request.form.get(\"tue\")", "app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db =", "busesdata(): added=\"BUS SUCESSFULLY ADDED\" selectall=\"PLEASE SELECT ALL\" if request.method==\"POST\": BUSNAME=request.form.get('busname') SEATS=request.form.get('seats') TICKECT_PER_SEAT=request.form.get('ticket_per_seat') DATE=request.form.get('date')", "return render_template('loginreal.html') @app.route('/loginpage', methods=['POST','GET']) #@loginreal(): def loginpage(): if request.method==\"POST\": #return render_template('loginreal.html') loginreal() return", "ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated(): BUSNAME=request.form.get('busname') CITY=request.form.get('city') SEAT=request.form.get('seats') DAY=request.form.get('day') TICKER_PER_SEAT=request.form.get('ticket_per_seat') @app.route('/download')", "sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname date=name1.bookingdate #for", "return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull) return render_template('mybooking.html',X=X,date=date) @app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses:", "import or_ print(\"golu\") #from flask.ext.login import LoginManager #lm = LoginManager() #lm.init_app(app) #lm.login_view =", "city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday", "render_template('busesdata',selectall=selectall) else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index')", "datetime today = datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday() % 7))", "image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image) @app.route('/loginreal', methods=['POST','GET']) def loginreal():", "= db.session.execute(text(\"select sum seats from Bookingdata where location=id\").first()) #print(sum) #booking=Bookingdata.query.all() name1= Bookingdata.query.filter_by(id=id).first() name=name1.busname", "return render_template('busesdata.html') @app.route('/index') def image(): event = Posts.query.filter_by(firstname='ghar').first() image = b64encode(event.ima) image =", "'POST': if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>': error = 'Invalid Credentials.", "id=db.Column(db.Integer, primary_key=True, autoincrement=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True) date=db.Column(db.String(50),", "or LAST==\" \" or EMAIL==\" \" or Month==\" \" or GANDER==\" \" or", "print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in collection: print('a') #number_trained = db.session.execute(text(\"select sum", "#busname=request.form['busname'] date=request.form['date'] global z city=request.form.get('city') person=request.form.get('person') if person=='0': return render_template('booking.html',noperson=noperson) else: id=Bookingdata.query.all() seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day", "for seat1 in seat: if seat1==None: return render_template('booking.html', rows=seat) else: return render_template('booking.html', rows=seat,date=date)", "<gh_stars>0 from flask import Flask, render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io", "autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50), unique=False, nullable=True)", "= id).first() for b in b: print(b) collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first() for collection in", "+ datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 = Busesdata(date=thursday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday})", "update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday}) db.session.commit() if date.friday=='friday': import datetime today = datetime.date.today() friday =", "#app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting(): event = Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>')", "#image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return render_template('index.html',data=list, image=image,username=username) #return render_template('/index.html') return render_template('loginreal.html')", "+ datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday})", "FIRST=request.form.get('firstname') LAST=request.form.get('lastname') EMAIL=request.form.get('email') Month=request.form.get('month') DAY=request.form.get('day') YEAR=request.form.get('year') GANDER=request.form.get('gender') PASSWORD=request.form.get('password') file=request.files['ima'] if FIRST==\" \" or", "ID=db.Column(db.Integer, primary_key=True, autoincrement=True) firstname=db.Column(db.String(50), unique=False, nullable=True) lastname=db.Column(db.String(50), unique=False, nullable=True) email=db.Column(db.String(50), unique=False, nullable=True) password=db.Column(db.String(50),", "datetime.timedelta((6 - today.weekday() % 7)) print(sunday) entry1 = Busesdata(date=sunday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday}) db.session.commit()", ",buses=buses,booking=booking) @app.route('/schedule2/<string:id>') def viewbookingdata2(id): buses=Busesdata.query.all() b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first() for b in b: print(b)", "file=request.files['ima'] if FIRST==\" \" or LAST==\" \" or EMAIL==\" \" or Month==\" \"", "render_template,request, url_for,redirect,send_file,session,abort from flask_sqlalchemy import SQLAlchemy from io import BytesIO from sqlalchemy.orm import", "#lm.init_app(app) #lm.login_view = 'login' #from app.admin import admin_blueprint from datetime import datetime today=datetime.now", "#lm.login_view = 'login' #from app.admin import admin_blueprint from datetime import datetime today=datetime.now print(today)", "today = datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday() % 7)) print(wednesday)", "@app.route('/schedule1') def viewbookingdata(): buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return", "% 7)) print(saterday) update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday}) db.session.commit() error=\"select person\" if request.method==\"POST\": #return redirect(url_for('mybooking1',id=id))", "db.session.commit() print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\"", "= today + datetime.timedelta((1 - today.weekday() % 7)) print(tuesday) entry1 = Busesdata(date=tuesday) db.session.commit()", "flask import flask_login #from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required from sqlalchemy import or_ print(\"golu\")", "render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image = b64encode(event.ima) image = base64.b64encode(event.ima).decode('ascii') return", "False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model): ID=db.Column(db.Integer,", "wednesday=print(date.wednesday) friday=print(date.friday) saterday=print(date.saterday) if date.day=='sunday': import datetime today = datetime.date.today() sunday = today", "= datetime.date.today() friday = today + datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1", "db.session.commit() return render_template('busesdata1.html',added=added) return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1) @app.route('/busesdata1',methods=['POST','GET']) def updated():", "sucessfull=\"BOOKED SUCESSFULLY\" today = datetime.now().strftime(\"%Y-%m-%d\") seat1=Busesdata.query.filter_by(id=id).all() for seat in seat1: print('g') if person==0:", "print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first() global", "request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate') update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x}) entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate) db.session.add(entry1)", ",buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None if request.method == 'POST': if", "= request.form.get('city') SEAT = request.form.get('seats') TICKET_PER_SEAT = request.form.get('ticket_per_seat') update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT}) update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY}) update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT}) update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT}) db.session.commit()", "render_template('booking.html') class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50),", "datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday() % 7)) print(thursday) entry1 =", "try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting') def dataediting():", "= False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app) class Posts(db.Model):", "if ragisted is None: return render_template('loginreal.html',invalid=invalid) else: session['ragisted']=True event = Posts.query.filter_by(firstname=username).first() #image =", "n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection) @app.route('/admin' ,methods=['GET','POST']) def login(): error = None if", "from sqlalchemy import func import sqlite3 from sqlalchemy.sql import text #from flaskblog import", "else: print(MONDAY) entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY) db.session.add(entry1) db.session.commit() return render_template('busesdata.html', added=added) return render_template('busesdata.html') @app.route('/index') def", "friday = today + datetime.timedelta((4 - today.weekday() % 7)) print(friday) entry1 = Busesdata(date=friday)", "class Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False,", "nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True) tuesday=db.Column(db.String(20),nullable=True) wednesday=db.Column(db.String(20),nullable=True) thursday=db.Column(db.String(20),nullable=True) friday=db.Column(db.String(20),nullable=True) saterday=db.Column(db.String(20),nullable=True) @app.route(\"/\")", "Busesdata.query.all() return render_template('dataediting.html',event=event) @app.route('/dataediting/<string:id>') def dataediting1(id): delete1=Busesdata.query.filter_by(id=id).delete() db.session.commit() print(delete1) print(id) print('golu') return redirect", "datetime today = datetime.date.today() wednesday = today + datetime.timedelta((2 - today.weekday() % 7))", "Busesdata(date=tuesday) db.session.commit() update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday}) if date.thursday=='thursday': import datetime today = datetime.date.today() thursday", "else: entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today) db.session.add(entry) db.session.commit() return render_template('name.html') return render_template('name.html') @app.route('/busesdata' ,methods=['GET','POST']) def busesdata(): added=\"BUS", "return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True) @app.route(\"/newlogin\") def loginsubmit(): return render_template('newlogin.html') @app.route(\"/schedule\") def schedule(): event2=Busesdata.query.all() for", "from datetime import datetime today=datetime.now print(today) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =", "Bookingdata(db.Model): id=db.Column(db.String(20) ,primary_key=True ,nullable=True) busname=db.Column(db.String(50), unique=False, nullable=True) seats=db.Column(db.String(50), unique=False, nullable=True) collection=db.Column(db.String(50), unique=False, nullable=True)", "Credentials. Please try again.' else: session['logged_in']=True return render_template('welcomeadmin.html') #app.secret_key=os.unrandom(12) return render_template('admin.html', error=error) @app.route('/dataediting')", "= datetime.date.today() saterday = today + datetime.timedelta((5 - today.weekday() % 7)) print(saterday) update5", "'mysql://root:@localhost/busservisenew' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SESSION_TYPE']='filesystem' app.secret_key = <KEY>' #login_manager=Login_Manager() print('jhgj') db = SQLAlchemy(app)", "nullable=True) date=db.Column(db.String(50), unique=False, nullable=True) city=db.Column(db.String(50), unique=False, nullable=True) type=db.Column(db.String(10), unique=False, nullable=True) ima=db.Column(db.LargeBinary) day=db.Column(db.String(20),nullable=True) monday=db.Column(db.String(20),nullable=True)", "buses=Busesdata.query.all() booking=Bookingdata.query.all() #for buses in buses: # n=(int(float(buses.totelseats)-int(float(buses.seats)))) #print(n) return render_template('schedule1.html' ,buses=buses,booking=booking) @app.route('/schedule2/<string:id>')", "#delete=Booking.query.filter_by(id=id).delete() db.session.commit() if request.method==\"POST\": seatfill=int(person) money=int(X) PERSON=person BUSNAME=seat.busname TYPE=seat.type ID=seat.id Bookingdate = request.form.get('bookingdate')", "print(delete1) print(id) print('golu') return redirect (url_for('dataediting')) @app.route('/busesdata1/<string:id>',methods=['POST','GET']) def busesdata1(id): added=\"BUS EDITED SUCESSFULLY\" select=Busesdata.query.filter_by(id=id).first()", "import datetime today = datetime.date.today() thursday = today + datetime.timedelta((3 - today.weekday() %" ]
[ "stranding algorithm for sequences mapped to a human genome reference assembly. Mapping coordinates", "pos + max_length + window + 1) except ValueError: raise MissingReferenceFlank( 'Could not", "= 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically derived default values", "coordinates. This is the most performant use case as no alignments are performed.", "self.align(ref, query, False) for alignment_tuple in alignments: a1, a2, score, begin, end =", "'MT' chr_name = chr_name if chr_name != 'XY' else 'X' max_length = max(len(_5p),", "as a score of 0 return 0 return alignment def align_and_log(self, ref, query):", "try this first if window == 0: if _3p == ref_3p: return FORWARD_STRAND", "= self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p)", "= chr_name == 'MT' chr_name = chr_name if chr_name != 'XY' else 'X'", "Illumina beadchip. Two points are awarded for each matching base and one #", "the specified coordinates. This is the most performant use case as no alignments", "self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') # alignments are expensive so try", "BLAT or BLAST. Given one or both flanks and genome mapping coordinates it", "a2, score, begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p,", "mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty =", "flank length of %d' % self.min_flank_length) # chromosome-specific conventions loop = chr_name ==", "ref_3p_RC: return REVERSE_STRAND if window == 0 and self.tolerance == 1.0: raise Unstrandable('Strict", "- window - max_length, pos + window) ref_3p = chromosome.sequence(pos + 1 -", "or BLAST. Given one or both flanks and genome mapping coordinates it determines", "from Bio.Seq import Seq from seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment,", "align(self, ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform a local", "specified reference assembly. An InconsistentAlignment exception is raised if alignments are accepted on", "raise Unstrandable('Position 0 is unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0", "unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p),", "rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p)", "min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty", "* self.match_score * self.tolerance def is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return", "%d' % self.min_flank_length) # chromosome-specific conventions loop = chr_name == 'MT' chr_name =", "human genome reference assembly. Mapping coordinates are required! This is not BLAT or", "`tolerance` setting defines the minimum alignment score relative to the query sequence length.", "% (chr_name, pos, window)) # exact comparisons are cheap so try this first", "is claimed to be faster and less memory intensive. Otherwise a tuple of", "use case as no alignments are performed. Otherwise, the algorithm will load the", "chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window - max_length, pos", "self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score,", "min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score =", "between the reference and query sequences using the specified (or default) score and", "scored against the query flanks. Alignments scoring above `len(query flank) * match_score *", "end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name,", "len(query) * self.match_score * self.tolerance def is_perfect_score(self, score, query): if len(query) < self.min_flank_length:", "if alignments are accepted on both strands. An Unstrandable exception is raised if", "= self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or", "Mapping coordinates are required! This is not BLAT or BLAST. Given one or", "strand of the specified reference assembly. A return value of -1 indicates that", "exact mapping coordinates to search nearby regions (up to the `window` size specified)", "(a perfect alignment has a score of `len(query flank) * match_score`) A return", "len(query) < self.min_flank_length: return False return score == len(query) * self.match_score def align(self,", "ref_5p = chromosome.sequence(pos - window - max_length, pos + window) ref_3p = chromosome.sequence(pos", "strands (!) # The flanks may be too short or the tolerance may", "of 1 indicates that alignments were accepted against the forward reference sequence and", "the minimum alignment score relative to the query sequence length. This is also", "1) except ValueError: raise MissingReferenceFlank( 'Could not find flanks for %s %d %d'", "is not BLAT or BLAST. Given one or both flanks and genome mapping", "less memory intensive. Otherwise a tuple of (align1, align2, score, begin, end) is", "long sequences. The `tolerance` setting defines the minimum alignment score relative to the", "= 1 REVERSE_STRAND = -1 # empirically derived default values from stranding hundreds", "point is subtracted for each mismatch. Gaps are strongly discouraged with a 5", "mapping coordinates to search nearby regions (up to the `window` size specified) but", "Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one", "import warnings from Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq from seqseek", "flank) * match_score * tolerance` are accepted. (a perfect alignment has a score", "longer than the specified' ' minimum flank length of %d' % self.min_flank_length) #", "the 5' and 3' flanks at the specified coordinates extending in each direction", "_5p == ref_3p_RC: return REVERSE_STRAND if window == 0 and self.tolerance == 1.0:", "score == len(query) * self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops to", "sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window -", "instructs bioptyhon to only return the integer score. This is claimed to be", "(MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0", "logging import warnings from Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq from", "score, begin, end) is returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty,", "and 3' flanks at the specified coordinates extending in each direction extended by", "are aligned and scored against the query flanks. Alignments scoring above `len(query flank)", "= tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length", "only check for exact sequence matches at the specified coordinates. This is the", "end) is returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only)", "score > len(query) * self.match_score * self.tolerance def is_perfect_score(self, score, query): if len(query)", "indicates that alignments were accepted against the forward reference sequence and the flanks", "complement of the forward reference sequence and the flanks correspond to the \"reverse\"", "query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is", "forward reference sequence and the flanks correspond to the \"reverse\" or \"minus\" strand", "chr_name != 'XY' else 'X' max_length = max(len(_5p), len(_3p)) # reference sequences try:", "claimed to be faster and less memory intensive. Otherwise a tuple of (align1,", "_3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments were accepted on", "self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if", "first if window == 0: if _3p == ref_3p: return FORWARD_STRAND if _5p", "the algorithm will only check for exact sequence matches at the specified coordinates.", "the forward reference sequence and the flanks are on the forward strand of", "self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev:", "# The flanks may be too short or the tolerance may be too", "(align1, align2, score, begin, end) is returned. \"\"\" alignment = align.localms(ref, query, self.match_score,", "== ref_3p: return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement())", "# exact comparisons are cheap so try this first if window == 0:", "align2, score, begin, end) is returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty,", "strand of the specified reference assembly. It can optionally look beyond exact mapping", "on long sequences. The `tolerance` setting defines the minimum alignment score relative to", "reverse complement of the forward reference sequence and the flanks correspond to the", "= self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if", "_5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif", "penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77", "sequence length. This is also impacted by changes to the alignment scoring parameters.", "query flanks. Alignments scoring above `len(query flank) * match_score * tolerance` are accepted.", "raised if alignments are accepted on both strands. An Unstrandable exception is raised", "parameters. When `tolerance` is 1.0 and `window` is 0.0 the algorithm will only", "def is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return False return score >", "alignments were accepted against the forward reference sequence and the flanks are on", "find flanks for %s %d %d' % (chr_name, pos, window)) # exact comparisons", "for each mismatch. Gaps are strongly discouraged with a 5 point # penalty.", "biopython's pairwise2.align.localms to perform a local alignment between the reference and query sequences", "align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment: #", "alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION):", "FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC,", "This is the most performant use case as no alignments are performed. Otherwise,", "points are awarded for each matching base and one # point is subtracted", "return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if window == 0 and", "chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm for sequences mapped", "were accepted against the forward reference sequence and the flanks are on the", "# Alignments were accepted on both strands (!) # The flanks may be", "discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1", "not alignment: # when biopython doesn't find any alignments in score_only mode it", "window) ref_3p = chromosome.sequence(pos + 1 - window, pos + max_length + window", "== len(query) * self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops to biopython's", "reference and query sequences using the specified (or default) score and penalty values.", "return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC =", "_5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p)", "the alignment scoring parameters. When `tolerance` is 1.0 and `window` is 0.0 the", "str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p == ref_5p_RC: return", "may be too short or the tolerance may be too loose. LOGGER.error('Forward alignments')", "are accepted. (a perfect alignment has a score of `len(query flank) * match_score`)", "('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise", "any alignments in score_only mode it returns # an empty list which we", "`len(query flank) * match_score`) A return value of 1 indicates that alignments were", "InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND", "self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to", "thousands of flanks # from an Illumina beadchip. Two points are awarded for", "and genome mapping coordinates it determines if the flanking sequence(s) correspond to the", "at the specified coordinates extending in each direction extended by `window`. These sequences", "return False return score > len(query) * self.match_score * self.tolerance def is_perfect_score(self, score,", "`window` size specified) but takes longer as local alignments are expensive on long", "scoring above `len(query flank) * match_score * tolerance` are accepted. (a perfect alignment", "align, format_alignment from Bio.Seq import Seq from seqseek import Chromosome from .exceptions import", "self.min_flank_length) # chromosome-specific conventions loop = chr_name == 'MT' chr_name = chr_name if", "are accepted on both strands. An Unstrandable exception is raised if no alignments", "= align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment:", "logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1", "self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment: # when biopython", "or the tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p)", "to the `window` size specified) but takes longer as local alignments are expensive", "from an Illumina beadchip. Two points are awarded for each matching base and", "len(query) * self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms", "when biopython doesn't find any alignments in score_only mode it returns # an", "one or both flanks and genome mapping coordinates it determines if the flanking", "so try this first if window == 0: if _3p == ref_3p: return", "% self.min_flank_length) # chromosome-specific conventions loop = chr_name == 'MT' chr_name = chr_name", "* self.tolerance def is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return False return", "0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically derived default values from", "+ window) ref_3p = chromosome.sequence(pos + 1 - window, pos + max_length +", "format_alignment from Bio.Seq import Seq from seqseek import Chromosome from .exceptions import (MissingReferenceFlank,", "is also impacted by changes to the alignment scoring parameters. When `tolerance` is", "REVERSE_STRAND = -1 # empirically derived default values from stranding hundreds of thousands", "short or the tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p,", "flank lengths may lead to inaccurate alignments') def is_high_scoring(self, score, query): if len(query)", "begin, end) is returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty,", "required! This is not BLAT or BLAST. Given one or both flanks and", "import align, format_alignment from Bio.Seq import Seq from seqseek import Chromosome from .exceptions", "reference assembly. A return value of -1 indicates that alignments were accepted against", "of (align1, align2, score, begin, end) is returned. \"\"\" alignment = align.localms(ref, query,", "defines the minimum alignment score relative to the query sequence length. This is", "for sequences mapped to a human genome reference assembly. Mapping coordinates are required!", "the `window` size specified) but takes longer as local alignments are expensive on", "strand of the specified reference assembly. An InconsistentAlignment exception is raised if alignments", "performant use case as no alignments are performed. Otherwise, the algorithm will load", "GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance =", "This is also impacted by changes to the alignment scoring parameters. When `tolerance`", "determines if the flanking sequence(s) correspond to the forward or reverse strand of", "len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank must be longer than the", "strongly discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY =", "is_rev: # Alignments were accepted on both strands (!) # The flanks may", "Seq from seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER", "assembly. An InconsistentAlignment exception is raised if alignments are accepted on both strands.", "# sanity checks if pos == 0: raise Unstrandable('Position 0 is unmapped') elif", "0.0 the algorithm will only check for exact sequence matches at the specified", "1 REVERSE_STRAND = -1 # empirically derived default values from stranding hundreds of", "== 0: raise Unstrandable('Position 0 is unmapped') elif chr_name in ('0', 0): raise", "be longer than the specified' ' minimum flank length of %d' % self.min_flank_length)", "build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm for sequences", "least one flank must be longer than the specified' ' minimum flank length", "elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank must be longer", "only return the integer score. This is claimed to be faster and less", "alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif", "reference assembly. It can optionally look beyond exact mapping coordinates to search nearby", "self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment: # when biopython doesn't", "# from an Illumina beadchip. Two points are awarded for each matching base", "5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5", "doesn't find any alignments in score_only mode it returns # an empty list", "BLAST. Given one or both flanks and genome mapping coordinates it determines if", "== ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if window ==", "tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score = match_score", "perform a local alignment between the reference and query sequences using the specified", "will load the reference sequences for the 5' and 3' flanks at the", "self.mismatch_penalty, score_only=score_only) if score_only and not alignment: # when biopython doesn't find any", "-1 indicates that alignments were accepted against the reverse complement of the forward", "with a 5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY", "FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1", "flank must be longer than the specified' ' minimum flank length of %d'", "query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment: # when", "0 return 0 return alignment def align_and_log(self, ref, query): alignments = self.align(ref, query,", "alignments = self.align(ref, query, False) for alignment_tuple in alignments: a1, a2, score, begin,", "faster and less memory intensive. Otherwise a tuple of (align1, align2, score, begin,", "no alignments are accepted. \"\"\" # sanity checks if pos == 0: raise", "flanks at the specified coordinates extending in each direction extended by `window`. These", "loop=loop) ref_5p = chromosome.sequence(pos - window - max_length, pos + window) ref_3p =", "must be longer than the specified' ' minimum flank length of %d' %", "pos + window) ref_3p = chromosome.sequence(pos + 1 - window, pos + max_length", "REVERSE_STRAND if window == 0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed')", "REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or", "try to do as few as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score,", "do as few as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return", "is the most performant use case as no alignments are performed. Otherwise, the", "max_length = max(len(_5p), len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop)", "self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not alignment: # when biopython doesn't find", "reference assembly. Mapping coordinates are required! This is not BLAT or BLAST. Given", "list which we treat as a score of 0 return 0 return alignment", "subtracted for each mismatch. Gaps are strongly discouraged with a 5 point #", "== 0: if _3p == ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return", "integer score. This is claimed to be faster and less memory intensive. Otherwise", "for exact sequence matches at the specified coordinates. This is the most performant", "window == 0: if _3p == ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC:", "alignments in score_only mode it returns # an empty list which we treat", "returns # an empty list which we treat as a score of 0", "score_only mode it returns # an empty list which we treat as a", "< self.min_flank_length: return False return score > len(query) * self.match_score * self.tolerance def", "pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm for sequences mapped to", "self.min_flank_length: raise FlanksTooShort('At least one flank must be longer than the specified' '", "A return value of -1 indicates that alignments were accepted against the reverse", "the forward reference sequence and the flanks correspond to the \"reverse\" or \"minus\"", "local alignment between the reference and query sequences using the specified (or default)", "flanking sequence(s) correspond to the forward or reverse strand of the specified reference", "flanks are on the forward strand of the specified reference assembly. A return", "* self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to", "if window == 0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') #", "\"\"\" Drops to biopython's pairwise2.align.localms to perform a local alignment between the reference", "= -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH,", "_5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score,", "sequence and the flanks correspond to the \"reverse\" or \"minus\" strand of the", "LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND", "if _3p == ref_3p: return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC", "self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate alignments') def is_high_scoring(self,", "_3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score", "score_only=True instructs bioptyhon to only return the integer score. This is claimed to", "match_score * tolerance` are accepted. (a perfect alignment has a score of `len(query", "unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank must be", "+ max_length + window + 1) except ValueError: raise MissingReferenceFlank( 'Could not find", "a tuple of (align1, align2, score, begin, end) is returned. \"\"\" alignment =", "flanks for %s %d %d' % (chr_name, pos, window)) # exact comparisons are", "It can optionally look beyond exact mapping coordinates to search nearby regions (up", "against the forward reference sequence and the flanks are on the forward strand", "on both strands. An Unstrandable exception is raised if no alignments are accepted.", "return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if", "def align(self, ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform a", "in alignments: a1, a2, score, begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple))", "alignment between the reference and query sequences using the specified (or default) score", "if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return", "loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p)", "__init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score", "from Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq from seqseek import Chromosome", "_5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score", "Unstrandable('Position 0 is unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is", "mapping coordinates it determines if the flanking sequence(s) correspond to the forward or", "_5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p):", "# reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos -", "is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return False return score > len(query)", "Alignments were accepted on both strands (!) # The flanks may be too", "begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build,", "= -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY,", "< DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate alignments') def is_high_scoring(self, score,", "window)) # exact comparisons are cheap so try this first if window ==", "max_length, pos + window) ref_3p = chromosome.sequence(pos + 1 - window, pos +", "Otherwise a tuple of (align1, align2, score, begin, end) is returned. \"\"\" alignment", "the flanks correspond to the \"reverse\" or \"minus\" strand of the specified reference", "return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score =", "5' and 3' flanks at the specified coordinates extending in each direction extended", "a human genome reference assembly. Mapping coordinates are required! This is not BLAT", "tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments')", "_5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd", "An InconsistentAlignment exception is raised if alignments are accepted on both strands. An", "cheap so try this first if window == 0: if _3p == ref_3p:", "as no alignments are performed. Otherwise, the algorithm will load the reference sequences", "max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank must be longer than", "look beyond exact mapping coordinates to search nearby regions (up to the `window`", "sequences and their reverse complements are aligned and scored against the query flanks.", "of thousands of flanks # from an Illumina beadchip. Two points are awarded", "self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments were accepted on both strands", "alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return REVERSE_STRAND raise Unstrandable('No matching alignments')", "0: if _3p == ref_3p: return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND", "= alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos,", "flanks. Alignments scoring above `len(query flank) * match_score * tolerance` are accepted. (a", "alignment scoring parameters. When `tolerance` is 1.0 and `window` is 0.0 the algorithm", "= str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p == ref_5p_RC:", "if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return", "beyond exact mapping coordinates to search nearby regions (up to the `window` size", "stranding failed') # alignments are expensive so try to do as few as", "Given one or both flanks and genome mapping coordinates it determines if the", "self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments were accepted", "the most performant use case as no alignments are performed. Otherwise, the algorithm", "window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm for sequences mapped to a", "specified' ' minimum flank length of %d' % self.min_flank_length) # chromosome-specific conventions loop", "mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead", "to the \"reverse\" or \"minus\" strand of the specified reference assembly. An InconsistentAlignment", "as local alignments are expensive on long sequences. The `tolerance` setting defines the", "1.0 and `window` is 0.0 the algorithm will only check for exact sequence", "_5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return REVERSE_STRAND raise", "class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance", "1.0: raise Unstrandable('Strict stranding failed') # alignments are expensive so try to do", "accepted on both strands. An Unstrandable exception is raised if no alignments are", "specified coordinates extending in each direction extended by `window`. These sequences and their", "window == 0: if _3p == ref_3p: return FORWARD_STRAND if _5p == ref_5p:", "= match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short", "most performant use case as no alignments are performed. Otherwise, the algorithm will", "reverse strand of the specified reference assembly. It can optionally look beyond exact", "no alignments are performed. Otherwise, the algorithm will load the reference sequences for", "raise FlanksTooShort('At least one flank must be longer than the specified' ' minimum", "# an empty list which we treat as a score of 0 return", "self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths", "accepted. \"\"\" # sanity checks if pos == 0: raise Unstrandable('Position 0 is", "in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length:", "Two points are awarded for each matching base and one # point is", "Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH =", "if len(query) < self.min_flank_length: return False return score == len(query) * self.match_score def", "to a human genome reference assembly. Mapping coordinates are required! This is not", "pairwise2.align.localms to perform a local alignment between the reference and query sequences using", "if window == 0: if _3p == ref_5p_RC: return REVERSE_STRAND if _5p ==", "self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd:", "if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\"", "* match_score`) A return value of 1 indicates that alignments were accepted against", "on both strands (!) # The flanks may be too short or the", "chr_name == 'MT' chr_name = chr_name if chr_name != 'XY' else 'X' max_length", "the specified coordinates extending in each direction extended by `window`. These sequences and", "query sequence length. This is also impacted by changes to the alignment scoring", "if is_fwd and is_rev: # Alignments were accepted on both strands (!) #", "self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score,", "chromosome.sequence(pos + 1 - window, pos + max_length + window + 1) except", "failed') # alignments are expensive so try to do as few as possible", "flanks correspond to the \"reverse\" or \"minus\" strand of the specified reference assembly.", "== 0: if _3p == ref_3p: return FORWARD_STRAND if _5p == ref_5p: return", "if chr_name != 'XY' else 'X' max_length = max(len(_5p), len(_3p)) # reference sequences", "False) for alignment_tuple in alignments: a1, a2, score, begin, end = alignment_tuple if", "elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p))", "gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty", "FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC,", "the reference sequences for the 5' and 3' flanks at the specified coordinates", "def is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return False return score ==", "query): alignments = self.align(ref, query, False) for alignment_tuple in alignments: a1, a2, score,", "of 0 return 0 return alignment def align_and_log(self, ref, query): alignments = self.align(ref,", "0 is unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped')", "are strongly discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY", "= -1 # empirically derived default values from stranding hundreds of thousands of", "DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class", "query, False) for alignment_tuple in alignments: a1, a2, score, begin, end = alignment_tuple", "The `tolerance` setting defines the minimum alignment score relative to the query sequence", "DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self,", "pos == 0: raise Unstrandable('Position 0 is unmapped') elif chr_name in ('0', 0):", "alignments') def is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return False return score", "algorithm will only check for exact sequence matches at the specified coordinates. This", "a score of `len(query flank) * match_score`) A return value of 1 indicates", "exact sequence matches at the specified coordinates. This is the most performant use", "if _5p == ref_3p_RC: return REVERSE_STRAND if window == 0 and self.tolerance ==", "to inaccurate alignments') def is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return False", "These sequences and their reverse complements are aligned and scored against the query", "be too short or the tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p,", "Bio.Seq import Seq from seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable,", "warnings from Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq from seqseek import", "lengths may lead to inaccurate alignments') def is_high_scoring(self, score, query): if len(query) <", "import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION =", "is 0.0 the algorithm will only check for exact sequence matches at the", "assembly. It can optionally look beyond exact mapping coordinates to search nearby regions", "score relative to the query sequence length. This is also impacted by changes", "accepted against the reverse complement of the forward reference sequence and the flanks", "the flanks are on the forward strand of the specified reference assembly. A", "(!) # The flanks may be too short or the tolerance may be", "specified) but takes longer as local alignments are expensive on long sequences. The", "on the forward strand of the specified reference assembly. A return value of", "ref, query): alignments = self.align(ref, query, False) for alignment_tuple in alignments: a1, a2,", "ref_3p = chromosome.sequence(pos + 1 - window, pos + max_length + window +", "as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score =", "size specified) but takes longer as local alignments are expensive on long sequences.", "the tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse", "specified (or default) score and penalty values. score_only=True instructs bioptyhon to only return", "length of %d' % self.min_flank_length) # chromosome-specific conventions loop = chr_name == 'MT'", "in each direction extended by `window`. These sequences and their reverse complements are", "to be faster and less memory intensive. Otherwise a tuple of (align1, align2,", "so try to do as few as possible fwd_5p_score = self.align(ref_5p, _5p) if", "extending in each direction extended by `window`. These sequences and their reverse complements", "one # point is subtracted for each mismatch. Gaps are strongly discouraged with", "genome mapping coordinates it determines if the flanking sequence(s) correspond to the forward", "alignments are accepted. \"\"\" # sanity checks if pos == 0: raise Unstrandable('Position", "to the alignment scoring parameters. When `tolerance` is 1.0 and `window` is 0.0", "and query sequences using the specified (or default) score and penalty values. score_only=True", "seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__)", "return the integer score. This is claimed to be faster and less memory", "coordinates are required! This is not BLAT or BLAST. Given one or both", "DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length", "regions (up to the `window` size specified) but takes longer as local alignments", "accepted against the forward reference sequence and the flanks are on the forward", "DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically derived default", "ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p == ref_5p_RC: return REVERSE_STRAND", "query): if len(query) < self.min_flank_length: return False return score == len(query) * self.match_score", "it returns # an empty list which we treat as a score of", "FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p", "accepted. (a perfect alignment has a score of `len(query flank) * match_score`) A", "window - max_length, pos + window) ref_3p = chromosome.sequence(pos + 1 - window,", "that alignments were accepted against the forward reference sequence and the flanks are", "1 - window, pos + max_length + window + 1) except ValueError: raise", "1 indicates that alignments were accepted against the forward reference sequence and the", "+ 1 - window, pos + max_length + window + 1) except ValueError:", "# empirically derived default values from stranding hundreds of thousands of flanks #", "False return score > len(query) * self.match_score * self.tolerance def is_perfect_score(self, score, query):", "InconsistentAlignment exception is raised if alignments are accepted on both strands. An Unstrandable", "one flank must be longer than the specified' ' minimum flank length of", "the specified reference assembly. It can optionally look beyond exact mapping coordinates to", "score and penalty values. score_only=True instructs bioptyhon to only return the integer score.", "are expensive so try to do as few as possible fwd_5p_score = self.align(ref_5p,", "An Unstrandable exception is raised if no alignments are accepted. \"\"\" # sanity", "_3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd", "the query sequence length. This is also impacted by changes to the alignment", "coordinates extending in each direction extended by `window`. These sequences and their reverse", "chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) <", "== 'MT' chr_name = chr_name if chr_name != 'XY' else 'X' max_length =", "sequence and the flanks are on the forward strand of the specified reference", "# alignments are expensive so try to do as few as possible fwd_5p_score", "biopython doesn't find any alignments in score_only mode it returns # an empty", "= logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND =", "REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score,", "return score == len(query) * self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops", "self.min_flank_length: return False return score == len(query) * self.match_score def align(self, ref, query,", "bioptyhon to only return the integer score. This is claimed to be faster", "A return value of 1 indicates that alignments were accepted against the forward", "are required! This is not BLAT or BLAST. Given one or both flanks", "by `window`. These sequences and their reverse complements are aligned and scored against", "return 0 return alignment def align_and_log(self, ref, query): alignments = self.align(ref, query, False)", "reference sequence and the flanks correspond to the \"reverse\" or \"minus\" strand of", "0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') # alignments are expensive", "This is claimed to be faster and less memory intensive. Otherwise a tuple", "_3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p):", "DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 #", "if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev", "The flanks may be too short or the tolerance may be too loose.", "'X' max_length = max(len(_5p), len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name, build,", "it determines if the flanking sequence(s) correspond to the forward or reverse strand", "window, pos + max_length + window + 1) except ValueError: raise MissingReferenceFlank( 'Could", "< self.min_flank_length: raise FlanksTooShort('At least one flank must be longer than the specified'", "longer as local alignments are expensive on long sequences. The `tolerance` setting defines", "reverse complements are aligned and scored against the query flanks. Alignments scoring above", "both strands. An Unstrandable exception is raised if no alignments are accepted. \"\"\"", "nearby regions (up to the `window` size specified) but takes longer as local", "max_length + window + 1) except ValueError: raise MissingReferenceFlank( 'Could not find flanks", "from seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER =", "the specified reference assembly. An InconsistentAlignment exception is raised if alignments are accepted", "we treat as a score of 0 return 0 return alignment def align_and_log(self,", "the query flanks. Alignments scoring above `len(query flank) * match_score * tolerance` are", "Gaps are strongly discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE = 2", "fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p)", "is_fwd and is_rev: # Alignments were accepted on both strands (!) # The", "Drops to biopython's pairwise2.align.localms to perform a local alignment between the reference and", "also impacted by changes to the alignment scoring parameters. When `tolerance` is 1.0", "* tolerance` are accepted. (a perfect alignment has a score of `len(query flank)", "stranding hundreds of thousands of flanks # from an Illumina beadchip. Two points", "2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def", "str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p == ref_5p_RC: return REVERSE_STRAND if _5p", "and one # point is subtracted for each mismatch. Gaps are strongly discouraged", "-5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY):", "the specified reference assembly. A return value of -1 indicates that alignments were", "can optionally look beyond exact mapping coordinates to search nearby regions (up to", "search nearby regions (up to the `window` size specified) but takes longer as", "< self.min_flank_length: return False return score == len(query) * self.match_score def align(self, ref,", "'Could not find flanks for %s %d %d' % (chr_name, pos, window)) #", "at the specified coordinates. This is the most performant use case as no", "# chromosome-specific conventions loop = chr_name == 'MT' chr_name = chr_name if chr_name", "against the reverse complement of the forward reference sequence and the flanks correspond", "- window, pos + max_length + window + 1) except ValueError: raise MissingReferenceFlank(", "of flanks # from an Illumina beadchip. Two points are awarded for each", "match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank", "if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate alignments') def", "%s %d %d' % (chr_name, pos, window)) # exact comparisons are cheap so", "sequences. The `tolerance` setting defines the minimum alignment score relative to the query", "if _3p == ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if", "both strands (!) # The flanks may be too short or the tolerance", "treat as a score of 0 return 0 return alignment def align_and_log(self, ref,", "3' flanks at the specified coordinates extending in each direction extended by `window`.", "reference assembly. An InconsistentAlignment exception is raised if alignments are accepted on both", "are performed. Otherwise, the algorithm will load the reference sequences for the 5'", "exception is raised if alignments are accepted on both strands. An Unstrandable exception", "Unstrandable exception is raised if no alignments are accepted. \"\"\" # sanity checks", "_3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: #", "ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform a local alignment", "sequence matches at the specified coordinates. This is the most performant use case", "ValueError: raise MissingReferenceFlank( 'Could not find flanks for %s %d %d' % (chr_name,", "DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE,", "raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least", "# penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE =", "== ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window ==", "and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') # alignments are expensive so", "FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement())", "forward or reverse strand of the specified reference assembly. It can optionally look", "<reponame>23andMe/stranding import logging import warnings from Bio.pairwise2 import align, format_alignment from Bio.Seq import", "impacted by changes to the alignment scoring parameters. When `tolerance` is 1.0 and", "score of `len(query flank) * match_score`) A return value of 1 indicates that", "coordinates it determines if the flanking sequence(s) correspond to the forward or reverse", "alignment score relative to the query sequence length. This is also impacted by", "hundreds of thousands of flanks # from an Illumina beadchip. Two points are", "value of 1 indicates that alignments were accepted against the forward reference sequence", "too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC,", "InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return REVERSE_STRAND raise Unstrandable('No matching", "using the specified (or default) score and penalty values. score_only=True instructs bioptyhon to", "specified reference assembly. A return value of -1 indicates that alignments were accepted", "if the flanking sequence(s) correspond to the forward or reverse strand of the", "LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a", "REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if window == 0 and self.tolerance", "self.tolerance def is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return False return score", "import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH", "= chromosome.sequence(pos - window - max_length, pos + window) ref_3p = chromosome.sequence(pos +", "alignment def align_and_log(self, ref, query): alignments = self.align(ref, query, False) for alignment_tuple in", "chr_name = chr_name if chr_name != 'XY' else 'X' max_length = max(len(_5p), len(_3p))", "aligned and scored against the query flanks. Alignments scoring above `len(query flank) *", "exception is raised if no alignments are accepted. \"\"\" # sanity checks if", "This is a flank stranding algorithm for sequences mapped to a human genome", "# point is subtracted for each mismatch. Gaps are strongly discouraged with a", "both flanks and genome mapping coordinates it determines if the flanking sequence(s) correspond", "by changes to the alignment scoring parameters. When `tolerance` is 1.0 and `window`", "warnings.warn('Short flank lengths may lead to inaccurate alignments') def is_high_scoring(self, score, query): if", "to search nearby regions (up to the `window` size specified) but takes longer", "'XY' else 'X' max_length = max(len(_5p), len(_3p)) # reference sequences try: chromosome =", "mismatch. Gaps are strongly discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE =", "self.min_flank_length: return False return score > len(query) * self.match_score * self.tolerance def is_perfect_score(self,", ".exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION", "to the query sequence length. This is also impacted by changes to the", "\"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and", "strands. An Unstrandable exception is raised if no alignments are accepted. \"\"\" #", "the specified' ' minimum flank length of %d' % self.min_flank_length) # chromosome-specific conventions", "length. This is also impacted by changes to the alignment scoring parameters. When", "and penalty values. score_only=True instructs bioptyhon to only return the integer score. This", "alignments are expensive so try to do as few as possible fwd_5p_score =", "== ref_3p_RC: return REVERSE_STRAND if window == 0 and self.tolerance == 1.0: raise", "is raised if alignments are accepted on both strands. An Unstrandable exception is", "mode it returns # an empty list which we treat as a score", "alignment has a score of `len(query flank) * match_score`) A return value of", "sequences using the specified (or default) score and penalty values. score_only=True instructs bioptyhon", "correspond to the \"reverse\" or \"minus\" strand of the specified reference assembly. An", "intensive. Otherwise a tuple of (align1, align2, score, begin, end) is returned. \"\"\"", "is raised if no alignments are accepted. \"\"\" # sanity checks if pos", "= self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if", "alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only and not", "each direction extended by `window`. These sequences and their reverse complements are aligned", "from stranding hundreds of thousands of flanks # from an Illumina beadchip. Two", "match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty", "are expensive on long sequences. The `tolerance` setting defines the minimum alignment score", "ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if window == 0", "too short or the tolerance may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p)", "each matching base and one # point is subtracted for each mismatch. Gaps", "base and one # point is subtracted for each mismatch. Gaps are strongly", "may lead to inaccurate alignments') def is_high_scoring(self, score, query): if len(query) < self.min_flank_length:", "is a flank stranding algorithm for sequences mapped to a human genome reference", "load the reference sequences for the 5' and 3' flanks at the specified", "loop = chr_name == 'MT' chr_name = chr_name if chr_name != 'XY' else", "= 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically", "tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length <", "of the specified reference assembly. A return value of -1 indicates that alignments", "default values from stranding hundreds of thousands of flanks # from an Illumina", "query): if len(query) < self.min_flank_length: return False return score > len(query) * self.match_score", "pos, window)) # exact comparisons are cheap so try this first if window", "== 0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') # alignments are", "sequences for the 5' and 3' flanks at the specified coordinates extending in", "the reference and query sequences using the specified (or default) score and penalty", "extended by `window`. These sequences and their reverse complements are aligned and scored", "a flank stranding algorithm for sequences mapped to a human genome reference assembly.", "Otherwise, the algorithm will load the reference sequences for the 5' and 3'", "may be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC,", "= min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty =", "memory intensive. Otherwise a tuple of (align1, align2, score, begin, end) is returned.", "forward strand of the specified reference assembly. A return value of -1 indicates", "import Seq from seqseek import Chromosome from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort)", "of -1 indicates that alignments were accepted against the reverse complement of the", "_5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm", "for each matching base and one # point is subtracted for each mismatch.", "flanks may be too short or the tolerance may be too loose. LOGGER.error('Forward", "\"\"\" This is a flank stranding algorithm for sequences mapped to a human", "lead to inaccurate alignments') def is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return", "for the 5' and 3' flanks at the specified coordinates extending in each", "raise MissingReferenceFlank( 'Could not find flanks for %s %d %d' % (chr_name, pos,", "in score_only mode it returns # an empty list which we treat as", "build, loop=loop) ref_5p = chromosome.sequence(pos - window - max_length, pos + window) ref_3p", "the flanking sequence(s) correspond to the forward or reverse strand of the specified", "except ValueError: raise MissingReferenceFlank( 'Could not find flanks for %s %d %d' %", "FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically derived default values from stranding", "ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0:", "return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p)", "reference sequence and the flanks are on the forward strand of the specified", "LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND", "query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform a local alignment between", "alignments are performed. Otherwise, the algorithm will load the reference sequences for the", "0): raise Unstrandable('Chromosome 0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At", "scoring parameters. When `tolerance` is 1.0 and `window` is 0.0 the algorithm will", "of the specified reference assembly. An InconsistentAlignment exception is raised if alignments are", "return False return score == len(query) * self.match_score def align(self, ref, query, score_only=True):", "alignments are accepted on both strands. An Unstrandable exception is raised if no", "0: raise Unstrandable('Position 0 is unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome", "query sequences using the specified (or default) score and penalty values. score_only=True instructs", "ref_3p: return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC", "awarded for each matching base and one # point is subtracted for each", "check for exact sequence matches at the specified coordinates. This is the most", "are awarded for each matching base and one # point is subtracted for", "-1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE,", "few as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score", "max(len(_5p), len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p =", "sequences mapped to a human genome reference assembly. Mapping coordinates are required! This", "the specified (or default) score and penalty values. score_only=True instructs bioptyhon to only", "the algorithm will load the reference sequences for the 5' and 3' flanks", "self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score,", "= max(len(_5p), len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p", "Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq from seqseek import Chromosome from", "raised if no alignments are accepted. \"\"\" # sanity checks if pos ==", "+ window + 1) except ValueError: raise MissingReferenceFlank( 'Could not find flanks for", "is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return False return score == len(query)", "expensive on long sequences. The `tolerance` setting defines the minimum alignment score relative", "and their reverse complements are aligned and scored against the query flanks. Alignments", "against the query flanks. Alignments scoring above `len(query flank) * match_score * tolerance`", "0 is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank", "%d' % (chr_name, pos, window)) # exact comparisons are cheap so try this", "self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if", "= 0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length =", "takes longer as local alignments are expensive on long sequences. The `tolerance` setting", "forward reference sequence and the flanks are on the forward strand of the", "alignments are expensive on long sequences. The `tolerance` setting defines the minimum alignment", "expensive so try to do as few as possible fwd_5p_score = self.align(ref_5p, _5p)", "alignments: a1, a2, score, begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def", "= mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may", "gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate alignments')", "self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev =", "\"\"\" # sanity checks if pos == 0: raise Unstrandable('Position 0 is unmapped')", "_5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window", "default) score and penalty values. score_only=True instructs bioptyhon to only return the integer", "self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND", "specified coordinates. This is the most performant use case as no alignments are", "_3p == ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND if window", "def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank", "alignment: # when biopython doesn't find any alignments in score_only mode it returns", "This is not BLAT or BLAST. Given one or both flanks and genome", "and scored against the query flanks. Alignments scoring above `len(query flank) * match_score", "is unmapped') elif max(len(_5p), len(_3p)) < self.min_flank_length: raise FlanksTooShort('At least one flank must", "mapped to a human genome reference assembly. Mapping coordinates are required! This is", "`tolerance` is 1.0 and `window` is 0.0 the algorithm will only check for", "and `window` is 0.0 the algorithm will only check for exact sequence matches", "local alignments are expensive on long sequences. The `tolerance` setting defines the minimum", "0.77 class GenomeStranding(object): def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length", "Unstrandable('Strict stranding failed') # alignments are expensive so try to do as few", "(chr_name, pos, window)) # exact comparisons are cheap so try this first if", "LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise", "derived default values from stranding hundreds of thousands of flanks # from an", "return score > len(query) * self.match_score * self.tolerance def is_perfect_score(self, score, query): if", "or \"minus\" strand of the specified reference assembly. An InconsistentAlignment exception is raised", "_5p) if is_fwd and is_rev: # Alignments were accepted on both strands (!)", "is unmapped') elif chr_name in ('0', 0): raise Unstrandable('Chromosome 0 is unmapped') elif", "raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return REVERSE_STRAND raise Unstrandable('No", "0: if _3p == ref_5p_RC: return REVERSE_STRAND if _5p == ref_3p_RC: return REVERSE_STRAND", "the integer score. This is claimed to be faster and less memory intensive.", "from .exceptions import (MissingReferenceFlank, InconsistentAlignment, Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15", "flanks # from an Illumina beadchip. Two points are awarded for each matching", "if no alignments are accepted. \"\"\" # sanity checks if pos == 0:", "a score of 0 return 0 return alignment def align_and_log(self, ref, query): alignments", "a local alignment between the reference and query sequences using the specified (or", "specified reference assembly. It can optionally look beyond exact mapping coordinates to search", "and is_rev: # Alignments were accepted on both strands (!) # The flanks", "which we treat as a score of 0 return 0 return alignment def", "values. score_only=True instructs bioptyhon to only return the integer score. This is claimed", "tolerance` are accepted. (a perfect alignment has a score of `len(query flank) *", "this first if window == 0: if _3p == ref_3p: return FORWARD_STRAND if", "each mismatch. Gaps are strongly discouraged with a 5 point # penalty. DEFAULT_MATCH_SCORE", "= 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE = 0.77 class GenomeStranding(object):", "= gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate", "def __init__(self, min_flank_length=DEFAULT_MIN_FLANK_LENGTH, tolerance=DEFAULT_TOLERANCE, match_score=DEFAULT_MATCH_SCORE, mismatch_penalty=DEFAULT_MISMATCH_PENALTY, gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY): self.min_flank_length = min_flank_length self.tolerance = tolerance", "flank stranding algorithm for sequences mapped to a human genome reference assembly. Mapping", "len(query) < self.min_flank_length: return False return score > len(query) * self.match_score * self.tolerance", "= chr_name if chr_name != 'XY' else 'X' max_length = max(len(_5p), len(_3p)) #", "' minimum flank length of %d' % self.min_flank_length) # chromosome-specific conventions loop =", "the forward strand of the specified reference assembly. A return value of -1", "but takes longer as local alignments are expensive on long sequences. The `tolerance`", "returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if score_only", "be faster and less memory intensive. Otherwise a tuple of (align1, align2, score,", "sanity checks if pos == 0: raise Unstrandable('Position 0 is unmapped') elif chr_name", "or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and", "be too loose. LOGGER.error('Forward alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p)", "15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND = 1 REVERSE_STRAND = -1 # empirically derived", "is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments", "- max_length, pos + window) ref_3p = chromosome.sequence(pos + 1 - window, pos", "+ 1) except ValueError: raise MissingReferenceFlank( 'Could not find flanks for %s %d", "_3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p):", "changes to the alignment scoring parameters. When `tolerance` is 1.0 and `window` is", "for %s %d %d' % (chr_name, pos, window)) # exact comparisons are cheap", "score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform a local alignment between the", "minimum alignment score relative to the query sequence length. This is also impacted", "the forward or reverse strand of the specified reference assembly. It can optionally", "chromosome.sequence(pos - window - max_length, pos + window) ref_3p = chromosome.sequence(pos + 1", "ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p ==", "if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return", "self.match_score * self.tolerance def is_perfect_score(self, score, query): if len(query) < self.min_flank_length: return False", "self.min_flank_length = min_flank_length self.tolerance = tolerance self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty", "of `len(query flank) * match_score`) A return value of 1 indicates that alignments", "MissingReferenceFlank( 'Could not find flanks for %s %d %d' % (chr_name, pos, window))", "self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND", "possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND fwd_3p_score = self.align(ref_3p,", "an Illumina beadchip. Two points are awarded for each matching base and one", "inaccurate alignments') def is_high_scoring(self, score, query): if len(query) < self.min_flank_length: return False return", "matching base and one # point is subtracted for each mismatch. Gaps are", "case as no alignments are performed. Otherwise, the algorithm will load the reference", "window == 0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding failed') # alignments", "= chromosome.sequence(pos + 1 - window, pos + max_length + window + 1)", "and not alignment: # when biopython doesn't find any alignments in score_only mode", "(up to the `window` size specified) but takes longer as local alignments are", "fwd_3p_score = self.align(ref_3p, _3p) if self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p)", "`len(query flank) * match_score * tolerance` are accepted. (a perfect alignment has a", "to the forward or reverse strand of the specified reference assembly. It can", "for alignment_tuple in alignments: a1, a2, score, begin, end = alignment_tuple if self.is_high_scoring(score,", "genome reference assembly. Mapping coordinates are required! This is not BLAT or BLAST.", "(or default) score and penalty values. score_only=True instructs bioptyhon to only return the", "return value of 1 indicates that alignments were accepted against the forward reference", "FlanksTooShort('At least one flank must be longer than the specified' ' minimum flank", "and less memory intensive. Otherwise a tuple of (align1, align2, score, begin, end)", "self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score,", "import logging import warnings from Bio.pairwise2 import align, format_alignment from Bio.Seq import Seq", "self.match_score = match_score self.mismatch_penalty = mismatch_penalty self.gap_open_penalty = gap_open_penalty if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH:", "False return score == len(query) * self.match_score def align(self, ref, query, score_only=True): \"\"\"", "to perform a local alignment between the reference and query sequences using the", "is returned. \"\"\" alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty, self.gap_open_penalty, self.mismatch_penalty, score_only=score_only) if", "alignments') self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent", "the \"reverse\" or \"minus\" strand of the specified reference assembly. An InconsistentAlignment exception", "_3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return", "and the flanks are on the forward strand of the specified reference assembly.", "score. This is claimed to be faster and less memory intensive. Otherwise a", "reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window", "self.align_and_log(ref_5p, _5p) self.align_and_log(ref_3p, _3p) LOGGER.error('Reverse alignments') self.align_and_log(ref_5p_RC, _3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments')", "> len(query) * self.match_score * self.tolerance def is_perfect_score(self, score, query): if len(query) <", "optionally look beyond exact mapping coordinates to search nearby regions (up to the", "not find flanks for %s %d %d' % (chr_name, pos, window)) # exact", "complements are aligned and scored against the query flanks. Alignments scoring above `len(query", "were accepted against the reverse complement of the forward reference sequence and the", "were accepted on both strands (!) # The flanks may be too short", "def align_and_log(self, ref, query): alignments = self.align(ref, query, False) for alignment_tuple in alignments:", "find any alignments in score_only mode it returns # an empty list which", "algorithm for sequences mapped to a human genome reference assembly. Mapping coordinates are", "will only check for exact sequence matches at the specified coordinates. This is", "conventions loop = chr_name == 'MT' chr_name = chr_name if chr_name != 'XY'", "correspond to the forward or reverse strand of the specified reference assembly. It", "else 'X' max_length = max(len(_5p), len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name,", "# when biopython doesn't find any alignments in score_only mode it returns #", "return alignment def align_and_log(self, ref, query): alignments = self.align(ref, query, False) for alignment_tuple", "values from stranding hundreds of thousands of flanks # from an Illumina beadchip.", "empty list which we treat as a score of 0 return 0 return", "When `tolerance` is 1.0 and `window` is 0.0 the algorithm will only check", "a1, a2, score, begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self,", "flanks and genome mapping coordinates it determines if the flanking sequence(s) correspond to", "than the specified' ' minimum flank length of %d' % self.min_flank_length) # chromosome-specific", "alignment_tuple in alignments: a1, a2, score, begin, end = alignment_tuple if self.is_high_scoring(score, query):", "* match_score * tolerance` are accepted. (a perfect alignment has a score of", "coordinates to search nearby regions (up to the `window` size specified) but takes", "comparisons are cheap so try this first if window == 0: if _3p", "the reverse complement of the forward reference sequence and the flanks correspond to", "-1 # empirically derived default values from stranding hundreds of thousands of flanks", "is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score,", "score, query): if len(query) < self.min_flank_length: return False return score > len(query) *", "are on the forward strand of the specified reference assembly. A return value", "setting defines the minimum alignment score relative to the query sequence length. This", "rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd = self.is_high_scoring(fwd_5p_score, _5p)", "`window` is 0.0 the algorithm will only check for exact sequence matches at", "direction extended by `window`. These sequences and their reverse complements are aligned and", "window + 1) except ValueError: raise MissingReferenceFlank( 'Could not find flanks for %s", "their reverse complements are aligned and scored against the query flanks. Alignments scoring", "self.is_high_scoring(fwd_3p_score, _3p) is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev:", "accepted on both strands (!) # The flanks may be too short or", "= self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if", "if pos == 0: raise Unstrandable('Position 0 is unmapped') elif chr_name in ('0',", "\"reverse\" or \"minus\" strand of the specified reference assembly. An InconsistentAlignment exception is", "score of 0 return 0 return alignment def align_and_log(self, ref, query): alignments =", "self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return REVERSE_STRAND", "chr_name if chr_name != 'XY' else 'X' max_length = max(len(_5p), len(_3p)) # reference", "0 return alignment def align_and_log(self, ref, query): alignments = self.align(ref, query, False) for", "align_and_log(self, ref, query): alignments = self.align(ref, query, False) for alignment_tuple in alignments: a1,", "checks if pos == 0: raise Unstrandable('Position 0 is unmapped') elif chr_name in", "an empty list which we treat as a score of 0 return 0", "flank) * match_score`) A return value of 1 indicates that alignments were accepted", "relative to the query sequence length. This is also impacted by changes to", "`window`. These sequences and their reverse complements are aligned and scored against the", "try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window - max_length,", "if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC = str(Seq(ref_5p).reverse_complement()) ref_3p_RC = str(Seq(ref_3p).reverse_complement()) if", "strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding", "exact comparisons are cheap so try this first if window == 0: if", "matches at the specified coordinates. This is the most performant use case as", "or reverse strand of the specified reference assembly. It can optionally look beyond", "match_score`) A return value of 1 indicates that alignments were accepted against the", "_3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This is a flank stranding algorithm for", "algorithm will load the reference sequences for the 5' and 3' flanks at", "= str(Seq(ref_3p).reverse_complement()) if window == 0: if _3p == ref_5p_RC: return REVERSE_STRAND if", "point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY = -5 DEFAULT_TOLERANCE", "performed. Otherwise, the algorithm will load the reference sequences for the 5' and", "score_only=score_only) if score_only and not alignment: # when biopython doesn't find any alignments", "alignments were accepted against the reverse complement of the forward reference sequence and", "beadchip. Two points are awarded for each matching base and one # point", "a 5 point # penalty. DEFAULT_MATCH_SCORE = 2 DEFAULT_MISMATCH_PENALTY = -1 DEFAULT_GAP_OPEN_PENALTY =", "as few as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p): return FORWARD_STRAND", "raise Unstrandable('Strict stranding failed') # alignments are expensive so try to do as", "\"minus\" strand of the specified reference assembly. An InconsistentAlignment exception is raised if", "self.match_score def align(self, ref, query, score_only=True): \"\"\" Drops to biopython's pairwise2.align.localms to perform", "Unstrandable, FlanksTooShort) LOGGER = logging.getLogger(__name__) DEFAULT_MIN_FLANK_LENGTH = 15 DEFAULT_WINDOW_EXTENSION = 0 FORWARD_STRAND =", "to biopython's pairwise2.align.localms to perform a local alignment between the reference and query", "of %d' % self.min_flank_length) # chromosome-specific conventions loop = chr_name == 'MT' chr_name", "to only return the integer score. This is claimed to be faster and", "score_only and not alignment: # when biopython doesn't find any alignments in score_only", "not BLAT or BLAST. Given one or both flanks and genome mapping coordinates", "or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments were accepted on both", "self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION): \"\"\" This", "of the specified reference assembly. It can optionally look beyond exact mapping coordinates", "or both flanks and genome mapping coordinates it determines if the flanking sequence(s)", "if score_only and not alignment: # when biopython doesn't find any alignments in", "of the forward reference sequence and the flanks correspond to the \"reverse\" or", "return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND rev_3p_score =", "_3p) self.align_and_log(ref_3p_RC, _5p) raise InconsistentAlignment('Inconsistent alignments') elif is_fwd: return FORWARD_STRAND elif is_rev: return", "%d %d' % (chr_name, pos, window)) # exact comparisons are cheap so try", "assembly. A return value of -1 indicates that alignments were accepted against the", "that alignments were accepted against the reverse complement of the forward reference sequence", "reference sequences for the 5' and 3' flanks at the specified coordinates extending", "chromosome-specific conventions loop = chr_name == 'MT' chr_name = chr_name if chr_name !=", "perfect alignment has a score of `len(query flank) * match_score`) A return value", "!= 'XY' else 'X' max_length = max(len(_5p), len(_3p)) # reference sequences try: chromosome", "self.is_perfect_score(fwd_3p_score, _3p): return FORWARD_STRAND rev_5p_score = self.align(ref_5p_RC, _3p) if self.is_perfect_score(rev_5p_score, _3p): return REVERSE_STRAND", "Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window - max_length, pos + window)", "return REVERSE_STRAND if window == 0 and self.tolerance == 1.0: raise Unstrandable('Strict stranding", "sequence(s) correspond to the forward or reverse strand of the specified reference assembly.", "return REVERSE_STRAND rev_3p_score = self.align(ref_3p_RC, _5p) if self.is_perfect_score(rev_3p_score, _5p): return REVERSE_STRAND is_fwd =", "= self.align(ref, query, False) for alignment_tuple in alignments: a1, a2, score, begin, end", "assembly. Mapping coordinates are required! This is not BLAT or BLAST. Given one", "if len(query) < self.min_flank_length: return False return score > len(query) * self.match_score *", "if window == 0: if _3p == ref_3p: return FORWARD_STRAND if _5p ==", "is subtracted for each mismatch. Gaps are strongly discouraged with a 5 point", "above `len(query flank) * match_score * tolerance` are accepted. (a perfect alignment has", "score, begin, end = alignment_tuple if self.is_high_scoring(score, query): LOGGER.error(format_alignment(*alignment_tuple)) def strand_flanks(self, _5p, _3p,", "and the flanks correspond to the \"reverse\" or \"minus\" strand of the specified", "= self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p) if is_fwd and is_rev: # Alignments were", "value of -1 indicates that alignments were accepted against the reverse complement of", "Alignments scoring above `len(query flank) * match_score * tolerance` are accepted. (a perfect", "has a score of `len(query flank) * match_score`) A return value of 1", "len(_3p)) # reference sequences try: chromosome = Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos", "_3p == ref_3p: return FORWARD_STRAND if _5p == ref_5p: return FORWARD_STRAND ref_5p_RC =", "empirically derived default values from stranding hundreds of thousands of flanks # from", "DEFAULT_MIN_FLANK_LENGTH: warnings.warn('Short flank lengths may lead to inaccurate alignments') def is_high_scoring(self, score, query):", "tuple of (align1, align2, score, begin, end) is returned. \"\"\" alignment = align.localms(ref,", "to do as few as possible fwd_5p_score = self.align(ref_5p, _5p) if self.is_perfect_score(fwd_5p_score, _5p):", "minimum flank length of %d' % self.min_flank_length) # chromosome-specific conventions loop = chr_name", "return value of -1 indicates that alignments were accepted against the reverse complement", "is 1.0 and `window` is 0.0 the algorithm will only check for exact", "are accepted. \"\"\" # sanity checks if pos == 0: raise Unstrandable('Position 0", "= Chromosome(chr_name, build, loop=loop) ref_5p = chromosome.sequence(pos - window - max_length, pos +", "are cheap so try this first if window == 0: if _3p ==", "indicates that alignments were accepted against the reverse complement of the forward reference", "penalty values. score_only=True instructs bioptyhon to only return the integer score. This is", "score, query): if len(query) < self.min_flank_length: return False return score == len(query) *", "== 1.0: raise Unstrandable('Strict stranding failed') # alignments are expensive so try to" ]
[ "None def load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\" directory [dict_name] :", "data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge", "bool, verbosity level ''' # read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR,", "print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions'", "log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not available. '", "verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose]", "merged_embeddings # _____________ Logging related functions _____________ def convert(o): if isinstance(o, np.int64): return", "level ''' # load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged =", "# load corresponding embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif", "raise ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings =", "os.path import join from os.path import abspath import json import pandas as pd", "= data[[i in cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape))", "dict_name): ''' Save best hyperparameters dictionary to \"logs\" directory [logs_dict] : dict [dict_name]", "directory [logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert)", "read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding embeddings if", "Yield successive n-sized chunks from list [lst] : python list [n] : int", "lang in ['en', 'es', 'fr']: for ds in ['train', 'test', 'eval']: path =", "Save best hyperparameters dictionary to \"logs\" directory [logs_dict] : dict [dict_name] : str", "(cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings", "'Questions' datasets [embeddings] : str, type of embeddings to load ('bert' or 'labse')", "and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for", "'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data", "dataframe data = [] for lang in ['en', 'es', 'fr']: for ds in", "and subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] =", "as a pandas dataframe data = [] for lang in ['en', 'es', 'fr']:", "read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding embeddings if embeddings == 'labse':", "''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not available.", "merged, merged_embeddings # _____________ Logging related functions _____________ def convert(o): if isinstance(o, np.int64):", "cat in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data", "1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories", "''' [filename] : str ''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive", "questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings", "full ' 'training.') with open() as logs_json: logs = json.load(logs_json) print('Best hyperparameters loaded...')", "embeddings to load ('bert' or 'labse') [verbose] : bool, verbosity level ''' #", "'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text',", "''' # load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent,", "emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data", "data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in", "argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0])))", "return merged, merged_embeddings # _____________ Logging related functions _____________ def convert(o): if isinstance(o,", "if not is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please run train.py in", "str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None def", "= data[~data['class'].isna()] # remove unannotated rows # split label into class and subclass,", "are not available. ' 'Please run train.py in \"hyper_opt\" mode before full '", "str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not", "mode before full ' 'training.') with open() as logs_json: logs = json.load(logs_json) print('Best", "def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to \"logs\" directory [logs_dict] :", "inplace=True) data = data[~data['class'].isna()] # remove unannotated rows # split label into class", "pd import numpy as np from configs import config as cf def is_available(filename):", "# split label into class and subclass, keep only class data[['class', 'subclass']] =", "'Questions' dataset [verbose] : bool, verbosity level ''' # read as a pandas", "for cat in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\"", "if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent'", "data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): '''", "yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool,", "'training.') with open() as logs_json: logs = json.load(logs_json) print('Best hyperparameters loaded...') return logs", "def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool, verbosity level ''' #", "= np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose:", "# read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path,", "''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized chunks from list", "[dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return", "pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] #", "class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True)", "before full ' 'training.') with open() as logs_json: logs = json.load(logs_json) print('Best hyperparameters", "''' Load 'Questions' dataset [verbose] : bool, verbosity level ''' # read as", "= read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding embeddings", "raise ValueError('Hyperparameters are not available. ' 'Please run train.py in \"hyper_opt\" mode before", "be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings =", "int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to \"logs\"", "'es', 'fr']: for ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds)))", "== 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert'", "[embeddings] : str, type of embeddings to load ('bert' or 'labse') [verbose] :", "data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category':", "= data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for i in", "' 'Please run train.py in \"hyper_opt\" mode before full ' 'training.') with open()", "import os from os.path import join from os.path import abspath import json import", "[filename] : str ''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized", "[verbose] : bool, verbosity level ''' # read as a pandas dataframe data_path", "label into class and subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1,", "Load 'Questions' dataset [verbose] : bool, verbosity level ''' # read as a", "verbosity level ''' # read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv'))", "run train.py in \"hyper_opt\" mode before full ' 'training.') with open() as logs_json:", "dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name): ''' Load best", "cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.')", "os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized chunks from list [lst] :", "load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\" directory [dict_name] : str '''", "as cf def is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename) def chunks(lst,", "data.append(df) data = pd.concat(data) # merge certain categories (see configs.py) and rename columns", "''' __author__ = '<NAME>' import os from os.path import join from os.path import", "categories for cat in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose:", "os from os.path import join from os.path import abspath import json import pandas", "'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR,", "names=['text', 'class']) data.append(df) data = pd.concat(data) # merge certain categories (see configs.py) and", "available. ' 'Please run train.py in \"hyper_opt\" mode before full ' 'training.') with", "[verbose] : bool, verbosity level ''' # read as a pandas dataframe data", "emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0]", "in ['en', 'es', 'fr']: for ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR,", "'{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please run", "data = [] for lang in ['en', 'es', 'fr']: for ds in ['train',", "= '<NAME>' import os from os.path import join from os.path import abspath import", "embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert':", "int ''' for i in range(0, len(lst), n): yield lst[i:i + n] def", "ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path,", "import numpy as np from configs import config as cf def is_available(filename): '''", "data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data", "'w'), default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name): ''' Load best hyperparameters", "__author__ = '<NAME>' import os from os.path import join from os.path import abspath", "chunks from list [lst] : python list [n] : int ''' for i", "def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings] : str, type", "i in range(0, len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load", "np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full", "\"hyper_opt\" mode before full ' 'training.') with open() as logs_json: logs = json.load(logs_json)", "''' utility functions ''' __author__ = '<NAME>' import os from os.path import join", "print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings =", "data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated rows #", "['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t',", "def is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename) def chunks(lst, n): '''", "bool, verbosity level ''' # read as a pandas dataframe data = []", "shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool, verbosity", "hyperparameters dictionary from \"logs\" directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name)", "in range(0, len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent'", ": str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None", "'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions'", "path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df)", "columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for cat in", "return None def load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\" directory [dict_name]", "usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated", "open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name): ''' Load", ": dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters", "is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield", "if isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save best", "shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings]", "print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] :", "'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated rows # split label", "def chunks(lst, n): ''' Yield successive n-sized chunks from list [lst] : python", "datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load", "# _____________ Logging related functions _____________ def convert(o): if isinstance(o, np.int64): return int(o)", "= pd.concat(data) # merge certain categories (see configs.py) and rename columns data['class'] =", "np from configs import config as cf def is_available(filename): ''' [filename] : str", "np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary", "from \"logs\" directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not", "merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________", "'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated rows", "load corresponding embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings", "abspath import json import pandas as pd import numpy as np from configs", "remove unannotated rows # split label into class and subclass, keep only class", "load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) #", "_____________ Logging related functions _____________ def convert(o): if isinstance(o, np.int64): return int(o) raise", "'Intent' dataset [verbose] : bool, verbosity level ''' # read as a pandas", "read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool, verbosity level ''' # read", "bool, verbosity level ''' # load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False)", "== merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging", "\"logs\" directory [logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'),", "expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for", ": str, type of embeddings to load ('bert' or 'labse') [verbose] : bool,", "'Intent' and 'Questions' datasets [embeddings] : str, type of embeddings to load ('bert'", "''' # read as a pandas dataframe data = [] for lang in", "[] for lang in ['en', 'es', 'fr']: for ds in ['train', 'test', 'eval']:", "# merge certain categories (see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) #", "print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related functions _____________ def", "['en', 'es', 'fr']: for ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang,", "dictionary from \"logs\" directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if", "verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related functions _____________", "data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool, verbosity level '''", "# remove unannotated rows # split label into class and subclass, keep only", "python list [n] : int ''' for i in range(0, len(lst), n): yield", "to load ('bert' or 'labse') [verbose] : bool, verbosity level ''' # load", "Merge 'Intent' and 'Questions' datasets [embeddings] : str, type of embeddings to load", "= data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True):", "read as a pandas dataframe data = [] for lang in ['en', 'es',", "verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings] : str, type of embeddings", "rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for cat", "verbosity level ''' # load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged", "'Please run train.py in \"hyper_opt\" mode before full ' 'training.') with open() as", "= np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return", "# load datasets intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions])", "save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to \"logs\" directory [logs_dict] : dict", "n-sized chunks from list [lst] : python list [n] : int ''' for", "categories (see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too", "'<NAME>' import os from os.path import join from os.path import abspath import json", "functions _____________ def convert(o): if isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict,", "lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data)", "can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings", "json import pandas as pd import numpy as np from configs import config", "in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape))", "[dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters", "configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories", "merge certain categories (see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove", "elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can", "or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1])))", "'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert' or", "unannotated rows # split label into class and subclass, keep only class data[['class',", "data[~data['class'].isna()] # remove unannotated rows # split label into class and subclass, keep", "dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please run train.py", "lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool, verbosity", "a pandas dataframe data = [] for lang in ['en', 'es', 'fr']: for", "best hyperparameters dictionary from \"logs\" directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR,", "from os.path import join from os.path import abspath import json import pandas as", "join from os.path import abspath import json import pandas as pd import numpy", "not is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please run train.py in \"hyper_opt\"", "= abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'},", "split label into class and subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-',", ": int ''' for i in range(0, len(lst), n): yield lst[i:i + n]", "merged = pd.concat([intent, questions]) # load corresponding embeddings if embeddings == 'labse': emb_to_load", "np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] ==", "i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True):", "shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related functions _____________ def convert(o): if", "directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise", "read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',',", "a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category'])", "isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters", "pandas as pd import numpy as np from configs import config as cf", "(too easy) categories for cat in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)]", "cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument", "hyper-parameters saved...') return None def load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\"", "level ''' # read as a pandas dataframe data = [] for lang", "len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose]", "embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load =", "embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be", "data = pd.concat(data) # merge certain categories (see configs.py) and rename columns data['class']", ": bool, verbosity level ''' # read as a pandas dataframe data_path =", "range(0, len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset", "(see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy)", "= pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()]", "datasets [embeddings] : str, type of embeddings to load ('bert' or 'labse') [verbose]", "cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def", "default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name): ''' Load best hyperparameters dictionary", "os.path import abspath import json import pandas as pd import numpy as np", "numpy as np from configs import config as cf def is_available(filename): ''' [filename]", "'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated rows # split label into", "# read as a pandas dataframe data = [] for lang in ['en',", "from list [lst] : python list [n] : int ''' for i in", "chunks(lst, n): ''' Yield successive n-sized chunks from list [lst] : python list", "is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please run train.py in \"hyper_opt\" mode", "[verbose] : bool, verbosity level ''' # load datasets intent = read_intent_dataset(verbose=False) questions", "inplace=True) data = data[[i in cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\"", "level ''' # read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data", "import pandas as pd import numpy as np from configs import config as", "+ n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool, verbosity level", "header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) # merge certain categories (see", "data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for cat in ['hi',", "assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings #", "intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert", "train.py in \"hyper_opt\" mode before full ' 'training.') with open() as logs_json: logs", "''' Yield successive n-sized chunks from list [lst] : python list [n] :", "corresponding embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings ==", "import abspath import json import pandas as pd import numpy as np from", "Logging related functions _____________ def convert(o): if isinstance(o, np.int64): return int(o) raise TypeError", "saved...') return None def load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\" directory", "'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class'])", "data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for i in data['class']]]", "= pd.concat([intent, questions]) # load corresponding embeddings if embeddings == 'labse': emb_to_load =", "' 'training.') with open() as logs_json: logs = json.load(logs_json) print('Best hyperparameters loaded...') return", "certain categories (see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map) # remove trivial", "for i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse',", "'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data =", "= [] for lang in ['en', 'es', 'fr']: for ds in ['train', 'test',", "if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset", "only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1,", "data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data =", "return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized chunks from list [lst]", "pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) # merge certain categories", "ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR,", "or 'labse') [verbose] : bool, verbosity level ''' # load datasets intent =", "for i in range(0, len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True): '''", "def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool, verbosity level ''' #", "merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings] : str, type of", "def load_logs(dict_name): ''' Load best hyperparameters dictionary from \"logs\" directory [dict_name] : str", "data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets", "intent = read_intent_dataset(verbose=False) questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding", "'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert)", "merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related", "= pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) # merge certain", "return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool, verbosity level", "str, type of embeddings to load ('bert' or 'labse') [verbose] : bool, verbosity", "data = data[~data['class'].isna()] # remove unannotated rows # split label into class and", "hyperparameters dictionary to \"logs\" directory [logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict,", "rows # split label into class and subclass, keep only class data[['class', 'subclass']]", "trivial (too easy) categories for cat in ['hi', 'okay_thanks']: data = data[data['class'] !=", "data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for cat in ['hi', 'okay_thanks']: data", "utility functions ''' __author__ = '<NAME>' import os from os.path import join from", ": bool, verbosity level ''' # read as a pandas dataframe data =", "np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape)) return merged,", "questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if", "embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings,", "= read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding embeddings if embeddings ==", "for ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df =", "data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for i", "data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool,", "= abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data", "from os.path import abspath import json import pandas as pd import numpy as", "pd.concat([intent, questions]) # load corresponding embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings,", "== 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert,", "questions = read_questions_dataset(verbose=False) merged = pd.concat([intent, questions]) # load corresponding embeddings if embeddings", "verbosity level ''' # read as a pandas dataframe data = [] for", "''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name):", "as pd import numpy as np from configs import config as cf def", "list [n] : int ''' for i in range(0, len(lst), n): yield lst[i:i", "if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load", "else: raise ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings", "n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] : bool, verbosity level '''", "return int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to", "data = data[[i in cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\" data", "sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) # merge certain categories (see configs.py)", "TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to \"logs\" directory [logs_dict]", "Load 'Intent' dataset [verbose] : bool, verbosity level ''' # read as a", "raise TypeError def save_logs(logs_dict, dict_name): ''' Save best hyperparameters dictionary to \"logs\" directory", "as np from configs import config as cf def is_available(filename): ''' [filename] :", "df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) # merge", "data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings] : str,", "'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove unannotated rows # split", "data[[i in cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return", "in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): '''", "= (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise", "configs import config as cf def is_available(filename): ''' [filename] : str ''' return", "loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings])", "if verbose: print('Full data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related functions", "cf def is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename) def chunks(lst, n):", "''' Load 'Intent' dataset [verbose] : bool, verbosity level ''' # read as", "n): yield lst[i:i + n] def read_intent_dataset(verbose=True): ''' Load 'Intent' dataset [verbose] :", "merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0] == merged_embeddings.shape[0] if verbose: print('Full data shape={}'.format(merged.shape))", "load ('bert' or 'labse') [verbose] : bool, verbosity level ''' # load datasets", "''' Load best hyperparameters dictionary from \"logs\" directory [dict_name] : str ''' log_path", "= '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are not available. ' 'Please", "= data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'], axis=1, inplace=True) data = data[[i", "_____________ def convert(o): if isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name):", "to \"logs\" directory [logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name),", "''' for i in range(0, len(lst), n): yield lst[i:i + n] def read_intent_dataset(verbose=True):", "!= 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def read_questions_dataset(verbose=True): ''' Load", "pandas dataframe data = [] for lang in ['en', 'es', 'fr']: for ds", "= np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings = np.vstack([intent_embeddings, questions_embeddings]) assert merged.shape[0]", "functions ''' __author__ = '<NAME>' import os from os.path import join from os.path", "''' Merge 'Intent' and 'Questions' datasets [embeddings] : str, type of embeddings to", "data shape={}'.format(merged.shape)) return merged, merged_embeddings # _____________ Logging related functions _____________ def convert(o):", "dictionary to \"logs\" directory [logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR,", "'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data", "verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and", "abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data =", "for lang in ['en', 'es', 'fr']: for ds in ['train', 'test', 'eval']: path", "return data def merge_datasets(embeddings='labse', verbose=True): ''' Merge 'Intent' and 'Questions' datasets [embeddings] :", "not available. ' 'Please run train.py in \"hyper_opt\" mode before full ' 'training.')", "convert(o): if isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name): ''' Save", "# remove trivial (too easy) categories for cat in ['hi', 'okay_thanks']: data =", "config as cf def is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename) def", "pd.concat(data) # merge certain categories (see configs.py) and rename columns data['class'] = data['class'].replace(cf.intent_label_map)", "dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text',", "= data['class'].replace(cf.intent_label_map) # remove trivial (too easy) categories for cat in ['hi', 'okay_thanks']:", "data.drop(['subclass'], axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for i in data['class']]] if", "'class']) data.append(df) data = pd.concat(data) # merge certain categories (see configs.py) and rename", "read_questions_dataset(verbose=True): ''' Load 'Questions' dataset [verbose] : bool, verbosity level ''' # read", "= (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert' or 'labse'\") print(f'{embeddings}", "import join from os.path import abspath import json import pandas as pd import", "def convert(o): if isinstance(o, np.int64): return int(o) raise TypeError def save_logs(logs_dict, dict_name): '''", "'fr']: for ds in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df", "dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...')", "in cf.questions_relevant_categories for i in data['class']]] if verbose: print('\\t\"Questions\" data shape={}'.format(data.shape)) return data", "dataset [verbose] : bool, verbosity level ''' # read as a pandas dataframe", "easy) categories for cat in ['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if", "''' Save best hyperparameters dictionary to \"logs\" directory [logs_dict] : dict [dict_name] :", "axis=1, inplace=True) data = data[[i in cf.questions_relevant_categories for i in data['class']]] if verbose:", "import json import pandas as pd import numpy as np from configs import", "[lst] : python list [n] : int ''' for i in range(0, len(lst),", "and 'Questions' datasets [embeddings] : str, type of embeddings to load ('bert' or", "n): ''' Yield successive n-sized chunks from list [lst] : python list [n]", "abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True)", "emb_to_load = (cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else:", "('bert' or 'labse') [verbose] : bool, verbosity level ''' # load datasets intent", "of embeddings to load ('bert' or 'labse') [verbose] : bool, verbosity level '''", "[logs_dict] : dict [dict_name] : str ''' json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best", ": python list [n] : int ''' for i in range(0, len(lst), n):", "successive n-sized chunks from list [lst] : python list [n] : int '''", "type of embeddings to load ('bert' or 'labse') [verbose] : bool, verbosity level", "into class and subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True)", "['hi', 'okay_thanks']: data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return", "data = data[data['class'] != 'intent:{}'.format(cat)] if verbose: print('\\t\"Intent\" data shape={}'.format(data.shape)) return data def", "class and subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class']", "delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question': 'text', 'Category': 'class'}, inplace=True) data = data[~data['class'].isna()] # remove", "print('Best hyper-parameters saved...') return None def load_logs(dict_name): ''' Load best hyperparameters dictionary from", "as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question',", ": bool, verbosity level ''' # load datasets intent = read_intent_dataset(verbose=False) questions =", "in \"hyper_opt\" mode before full ' 'training.') with open() as logs_json: logs =", "\"logs\" directory [dict_name] : str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path):", "'{}.tsv'.format(ds))) df = pd.read_csv(path, header=None, sep='\\t', names=['text', 'class']) data.append(df) data = pd.concat(data) #", "[n] : int ''' for i in range(0, len(lst), n): yield lst[i:i +", "subclass, keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip()", "str ''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized chunks from", "ValueError('Hyperparameters are not available. ' 'Please run train.py in \"hyper_opt\" mode before full", "from configs import config as cf def is_available(filename): ''' [filename] : str '''", "related functions _____________ def convert(o): if isinstance(o, np.int64): return int(o) raise TypeError def", "(cf.intent_embeddings, cf.questions_embeddings) elif embeddings == 'bert': emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings", "keep only class data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True) data['class'] = data['class'].str.strip() data.drop(['subclass'],", "in ['train', 'test', 'eval']: path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds))) df = pd.read_csv(path, header=None,", "'labse'\") print(f'{embeddings} embeddings loaded.') intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0]))) questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR, emb_to_load[1]))) merged_embeddings", "emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert) else: raise ValueError(\"embeddings argument can be 'bert' or 'labse'\")", ": str ''' return os.path.isfile(filename) def chunks(lst, n): ''' Yield successive n-sized chunks", "'labse') [verbose] : bool, verbosity level ''' # load datasets intent = read_intent_dataset(verbose=False)", "''' # read as a pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data =", "Load best hyperparameters dictionary from \"logs\" directory [dict_name] : str ''' log_path =", ": str ''' log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name) if not is_available(log_path): raise ValueError('Hyperparameters are", "pandas dataframe data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv')) data = pd.read_csv(data_path, delimiter=',', usecols=['Question', 'Category']) data.rename(columns={'Question':", "import config as cf def is_available(filename): ''' [filename] : str ''' return os.path.isfile(filename)", "list [lst] : python list [n] : int ''' for i in range(0,", "questions]) # load corresponding embeddings if embeddings == 'labse': emb_to_load = (cf.intent_embeddings, cf.questions_embeddings)", "remove trivial (too easy) categories for cat in ['hi', 'okay_thanks']: data = data[data['class']", "json.dump(logs_dict, open('{}/{}.json'.format(cf.LOGS_DIR, dict_name), 'w'), default=convert) print('Best hyper-parameters saved...') return None def load_logs(dict_name): '''", "best hyperparameters dictionary to \"logs\" directory [logs_dict] : dict [dict_name] : str '''" ]
[ "n = input().split() lista = [int(i) for i in n] lista.sort() # sort", "lista.sort() # sort ordena a lista em ordem crescente print(*lista, sep='\\n') # *", "print(*lista, sep='\\n') # * serve para imprimir toda a lista print() print(*n, sep='\\n')", "lista = [int(i) for i in n] lista.sort() # sort ordena a lista", "ordena a lista em ordem crescente print(*lista, sep='\\n') # * serve para imprimir", "= input().split() lista = [int(i) for i in n] lista.sort() # sort ordena", "= [int(i) for i in n] lista.sort() # sort ordena a lista em", "for i in n] lista.sort() # sort ordena a lista em ordem crescente", "lista em ordem crescente print(*lista, sep='\\n') # * serve para imprimir toda a", "n] lista.sort() # sort ordena a lista em ordem crescente print(*lista, sep='\\n') #", "a lista em ordem crescente print(*lista, sep='\\n') # * serve para imprimir toda", "in n] lista.sort() # sort ordena a lista em ordem crescente print(*lista, sep='\\n')", "em ordem crescente print(*lista, sep='\\n') # * serve para imprimir toda a lista", "# sort ordena a lista em ordem crescente print(*lista, sep='\\n') # * serve", "<reponame>NOBarbosa/Exercicios_Python n = input().split() lista = [int(i) for i in n] lista.sort() #", "sort ordena a lista em ordem crescente print(*lista, sep='\\n') # * serve para", "crescente print(*lista, sep='\\n') # * serve para imprimir toda a lista print() print(*n,", "[int(i) for i in n] lista.sort() # sort ordena a lista em ordem", "input().split() lista = [int(i) for i in n] lista.sort() # sort ordena a", "i in n] lista.sort() # sort ordena a lista em ordem crescente print(*lista,", "ordem crescente print(*lista, sep='\\n') # * serve para imprimir toda a lista print()" ]
[ "timezone, timedelta import os import json import pickle import aioredis import asyncio import", "@dataclass class OrderInfo: order_id: str customer_id: str order_date: datetime email: str ip_addr: str", "return \"customer or email not found (this example only works for known customer_id", "payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used for prediction contatining `is_fraud` field", "= len(x) def _calc_amount_stats(data: dict, by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}')", "'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async", "= app_logger() model = None db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id',", "fb: model = pickle.load(fb) if db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting", "hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api", "x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return data # Cell", "Cell async def _lookup_db(key: str): item = await db.get(key) if item is None:", "**_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k: str, new_item: str): x", "return None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def", "await db.get(key) if item is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict)", "return data # Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) ->", "hopeit.app.logger import app_logger # Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id: str", "'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global model,", "\"Order Information\"), responses={ 200: (dict, \"features used for prediction contatining `is_fraud` field as", "not found (this example only works for known customer_id and email)\" return payload", "EventContext, response: PostprocessHook) -> dict: if payload is None: response.status = 404 return", "or email not found (this example only works for known customer_id and email)\")", "return data def _calc_counts(data: dict, by: str): for col in ['ip_addr', 'customer_id', 'email']:", "['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell", "import asyncio import pandas as pd import numpy as np import xgboost as", "hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context import EventContext,", "_lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or email_features is None: return None", "to edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model',", "def _calc_counts(data: dict, by: str): for col in ['ip_addr', 'customer_id', 'email']: x =", "from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context import", "len(x) def _calc_amount_stats(data: dict, by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if", "import numpy as np import xgboost as xgb from dataclasses import dataclass from", "dict, context: EventContext) -> dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y", "contatining `is_fraud` field as result of prediction\"), 404: (str, \"customer or email not", ") logger = app_logger() model = None db = None features = ['order_amount',", "dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data,", "data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str): col = 'order_amount' x =", "datetime, timezone, timedelta import os import json import pickle import aioredis import asyncio", "pickle import aioredis import asyncio import pandas as pd import numpy as np", "# Cell from typing import Dict, Optional from datetime import datetime, timezone, timedelta", "'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global model, db if model", "'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell", "import pickle import aioredis import asyncio import pandas as pd import numpy as", "async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in", "Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if payload is None: response.status =", "None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str): col = 'order_amount' x", "= model.predict(x) data['is_fraud'] = y[0].item() return data # Cell async def __postprocess__(payload: Optional[dict],", "example only works for known customer_id and email)\") } ) logger = app_logger()", "by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}',", "return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo, context: EventContext)", "hopeit.app.api import event_api from hopeit.app.logger import app_logger # Cell @dataobject @dataclass class OrderInfo:", "dict, by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if x is not", "import Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from", "OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in database...\") assert db,", "data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order:", "found (this example only works for known customer_id and email)\") } ) logger", "missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None", "NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__',", "response: PostprocessHook) -> dict: if payload is None: response.status = 404 return \"customer", "# Cell async def __init_event__(context: EventContext): global model, db if model is None:", "address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address) #", "_calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by: str): for col in ['ip_addr',", "logger = app_logger() model = None db = None features = ['order_amount', 'num_email_by_customer_id',", "= None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email',", "in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}']", "'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context:", "['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200:", "hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import Compression", "\"customer or email not found (this example only works for known customer_id and", "np.sum(x) # Cell async def predict(data: dict, context: EventContext) -> dict: df =", "import aioredis import asyncio import pandas as pd import numpy as np import", "otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features',", "k: str, new_item: str): x = data.get(k) if isinstance(x, str): x = json.loads(x)", "data['is_fraud'] = y[0].item() return data # Cell async def __postprocess__(payload: Optional[dict], context: EventContext,", "from hopeit.app.api import event_api from hopeit.app.logger import app_logger # Cell @dataobject @dataclass class", "'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global model, db if", "file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\") with open(file_name,", "\"Connection to database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if", "import serialize, Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook", "import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api from", "email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or email_features", "_calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by: str):", "None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order:", "if customer_id_features is None or email_features is None: return None return { **_update_features(order,", "'email') return data def _calc_counts(data: dict, by: str): for col in ['ip_addr', 'customer_id',", "(unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__',", "email not found (this example only works for known customer_id and email)\" return", "Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]:", "'__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing", "'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email',", "'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by: str): for", "xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return data # Cell async def", "'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by:", "class OrderInfo: order_id: str customer_id: str order_date: datetime email: str ip_addr: str order_amount:", "'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id',", "'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global model, db if model is", "= os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\") with open(file_name, 'rb')", "data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def", "_calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict,", "f\"Loading model for prediction from {file_name}...\") with open(file_name, 'rb') as fb: model =", "(dict, \"features used for prediction contatining `is_fraud` field as result of prediction\"), 404:", "{address}...\") db = await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item =", "__all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__']", "from hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn,", "aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item = await db.get(key) if item", "import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import Compression from", "def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in database...\")", "Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used for prediction contatining `is_fraud`", "str ip_addr: str order_amount: float location_lat: float location_long: float # Cell __steps__ =", "context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in database...\") assert db, \"Connection", "None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\") with", "def _calc_amount_stats(data: dict, by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if x", "__postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if payload is None: response.status", "x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}']", "_append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data", "'order_amount' x = data.get(f'{col}_by_{by}') if x is not None: x = np.array(x) data[f'{col}_max_by_{by}']", "None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x)", "= json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str):", "is None: return None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict()", "_append(data: dict, k: str, new_item: str): x = data.get(k) if isinstance(x, str): x", "global model, db if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading", "str order_date: datetime email: str ip_addr: str order_amount: float location_lat: float location_long: float", "return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict,", "from dataclasses import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization,", "= 'order_amount' x = data.get(f'{col}_by_{by}') if x is not None: x = np.array(x)", "ip_addr: str order_amount: float location_lat: float location_long: float # Cell __steps__ = ['lookup_features',", "email: str ip_addr: str order_amount: float location_lat: float location_long: float # Cell __steps__", "as result of prediction\"), 404: (str, \"customer or email not found (this example", "is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str): col =", "= None db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id',", "prediction\"), 404: (str, \"customer or email not found (this example only works for", "f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id')", "model = pickle.load(fb) if db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to", "Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id: str order_date: datetime email: str", "'__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing import Dict, Optional from datetime", "asyncio import pandas as pd import numpy as np import xgboost as xgb", "col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is not None:", "def _lookup_db(key: str): item = await db.get(key) if item is None: return None", "'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] = len(x)", "model, db if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model", "by: str): for col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x", "Dict, Optional from datetime import datetime, timezone, timedelta import os import json import", "import Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import app_logger # Cell", "x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by:", "location_long: float # Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict", "not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str): col = 'order_amount'", "customer_id_features is None or email_features is None: return None return { **_update_features(order, email_features,", "(str, \"customer or email not found (this example only works for known customer_id", "await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or email_features is None:", "works for known customer_id and email)\") } ) logger = app_logger() model =", "used for prediction contatining `is_fraud` field as result of prediction\"), 404: (str, \"customer", "None: return None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() }", "} def _append(data: dict, k: str, new_item: str): x = data.get(k) if isinstance(x,", "if x is not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] =", "f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email')", "= list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data,", "prediction contatining `is_fraud` field as result of prediction\"), 404: (str, \"customer or email", "customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or", "'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k: str, new_item: str):", "customer_id and email)\") } ) logger = app_logger() model = None db =", "-> Optional[dict]: logger.info(context, \"Looking up features in database...\") assert db, \"Connection to database", "'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by: str): for col in", "404: (str, \"customer or email not found (this example only works for known", "of prediction\"), 404: (str, \"customer or email not found (this example only works", "= ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={", "db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email',", "app_logger # Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id: str order_date: datetime", "# Cell async def predict(data: dict, context: EventContext) -> dict: df = pd.DataFrame([data],", "Optional from datetime import datetime, timezone, timedelta import os import json import pickle", "= await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or email_features is", "'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def", "= np.sum(x) # Cell async def predict(data: dict, context: EventContext) -> dict: df", "None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo, context:", "_append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email')", "await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item = await db.get(key) if", "email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k: str, new_item:", "not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] =", "columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return data #", "pickle.load(fb) if db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\")", "model.predict(x) data['is_fraud'] = y[0].item() return data # Cell async def __postprocess__(payload: Optional[dict], context:", "def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if payload is None:", "is None: response.status = 404 return \"customer or email not found (this example", "to database {address}...\") db = await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str):", "'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email',", "f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data,", "async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if payload is", "(this example only works for known customer_id and email)\") } ) logger =", "Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used for prediction contatining", "serialize, Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook from", "email)\") } ) logger = app_logger() model = None db = None features", "from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import", "-> dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud']", "by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if x is not None:", "async def predict(data: dict, context: EventContext) -> dict: df = pd.DataFrame([data], columns=features) x", "for known customer_id and email)\") } ) logger = app_logger() model = None", "dataclasses import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize", "or email not found (this example only works for known customer_id and email)\"", "model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from", "assert db, \"Connection to database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email)", "_update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data,", "'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing import Dict, Optional", "xgb from dataclasses import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize,", "['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id',", "'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict,", "_lookup_db(key: str): item = await db.get(key) if item is None: return None return", "event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used for", "is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db = await", "is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\")", "# Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo,", "order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data,", "json import pickle import aioredis import asyncio import pandas as pd import numpy", "pd import numpy as np import xgboost as xgb from dataclasses import dataclass", "{ **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k:", "str, new_item: str): x = data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item)", "float # Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\",", "None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address)", "as fb: model = pickle.load(fb) if db is None: address = context.env['db']['url'] logger.info(context,", "list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}',", "= pickle.load(fb) if db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database", "# Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id: str order_date: datetime email:", "'predict', '__postprocess__'] # Cell from typing import Dict, Optional from datetime import datetime,", "str): item = await db.get(key) if item is None: return None return deserialize(item,", "= event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used", "= data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict,", "email_features is None: return None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'),", "Cell async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features", "= y[0].item() return data # Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response:", "Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order", "app_logger() model = None db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id',", "'lookup_features', 'predict', '__postprocess__'] # Cell from typing import Dict, Optional from datetime import", "= context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address) # Cell", "hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE", "edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db',", "= np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] =", "SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import app_logger # Cell @dataobject @dataclass", "**order.to_dict() } def _append(data: dict, k: str, new_item: str): x = data.get(k) if", "'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id',", "item = await db.get(key) if item is None: return None return deserialize(item, Serialization.PICKLE4,", "data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data: dict, context:", "item is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async", "only works for known customer_id and email)\") } ) logger = app_logger() model", "datetime import datetime, timezone, timedelta import os import json import pickle import aioredis", "= await db.get(key) if item is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4,", "logger.info(context, f\"Loading model for prediction from {file_name}...\") with open(file_name, 'rb') as fb: model", "08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features',", "features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id',", "specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict',", "db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db =", "as np import xgboost as xgb from dataclasses import dataclass from hopeit.dataobjects import", "logger.info(context, f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address) # Cell async def", "= np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data:", "pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return data", "from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import", "import json import pickle import aioredis import asyncio import pandas as pd import", "_lookup_db(order.email) ) if customer_id_features is None or email_features is None: return None return", "field as result of prediction\"), 404: (str, \"customer or email not found (this", "col = 'order_amount' x = data.get(f'{col}_by_{by}') if x is not None: x =", "import datetime, timezone, timedelta import os import json import pickle import aioredis import", "_append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data,", "result of prediction\"), 404: (str, \"customer or email not found (this example only", "database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is", "if item is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell", "Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api", "None or email_features is None: return None return { **_update_features(order, email_features, 'email'), **_update_features(order,", "= np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] =", "_calc_amount_stats(data: dict, by: str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if x is", "dict, k: str, new_item: str): x = data.get(k) if isinstance(x, str): x =", "f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def", "data def _calc_counts(data: dict, by: str): for col in ['ip_addr', 'customer_id', 'email']: x", "<gh_stars>1-10 # AUTOGENERATED! DO NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified).", "**_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k: str,", "x is not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x)", "open(file_name, 'rb') as fb: model = pickle.load(fb) if db is None: address =", "is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def", "Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import app_logger # Cell @dataobject", "str order_amount: float location_lat: float location_long: float # Cell __steps__ = ['lookup_features', 'predict']", "None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email',", "PostprocessHook) -> dict: if payload is None: response.status = 404 return \"customer or", "_calc_counts(data: dict, by: str): for col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}')", "'__postprocess__'] # Cell from typing import Dict, Optional from datetime import datetime, timezone,", "data.get(f'{col}_by_{by}') if x is not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}']", "'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global model, db", "str customer_id: str order_date: datetime email: str ip_addr: str order_amount: float location_lat: float", "from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import app_logger", "Information\"), responses={ 200: (dict, \"features used for prediction contatining `is_fraud` field as result", "new_item: str): x = data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k]", "to database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features", "dict: if payload is None: response.status = 404 return \"customer or email not", "np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async", "from typing import Dict, Optional from datetime import datetime, timezone, timedelta import os", "= ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] #", "EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in database...\") assert db, \"Connection to", "np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x)", "'rb') as fb: model = pickle.load(fb) if db is None: address = context.env['db']['url']", "data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount)", "= pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return", "float location_long: float # Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live:", "None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features, 'customer_id'), **order.to_dict() } def _append(data:", "AUTOGENERATED! DO NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified). __all__ =", "str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict,", "= 404 return \"customer or email not found (this example only works for", "timedelta import os import json import pickle import aioredis import asyncio import pandas", "200: (dict, \"features used for prediction contatining `is_fraud` field as result of prediction\"),", "if db is None: address = context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db", "np import xgboost as xgb from dataclasses import dataclass from hopeit.dataobjects import dataobject", "order_id: str customer_id: str order_date: datetime email: str ip_addr: str order_amount: float location_lat:", "pandas as pd import numpy as np import xgboost as xgb from dataclasses", "Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context,", "db if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for", "import os import json import pickle import aioredis import asyncio import pandas as", "if payload is None: response.status = 404 return \"customer or email not found", "features in database...\") assert db, \"Connection to database missing.\" customer_id_features, email_features = await", "= xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item() return data # Cell async", "Cell from typing import Dict, Optional from datetime import datetime, timezone, timedelta import", "data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) #", "customer_id: str order_date: datetime email: str ip_addr: str order_amount: float location_lat: float location_long:", "'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] #", "dict) # Cell async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking", "isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data:", "in database...\") assert db, \"Connection to database missing.\" customer_id_features, email_features = await asyncio.gather(", "# Cell async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up", "Cell async def __init_event__(context: EventContext): global model, db if model is None: file_name", "prediction from {file_name}...\") with open(file_name, 'rb') as fb: model = pickle.load(fb) if db", "order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return", "db = await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item = await", "dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context", "y = model.predict(x) data['is_fraud'] = y[0].item() return data # Cell async def __postprocess__(payload:", "= data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def", "'__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from", "with open(file_name, 'rb') as fb: model = pickle.load(fb) if db is None: address", "x = data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:]))", "summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features used for prediction", "numpy as np import xgboost as xgb from dataclasses import dataclass from hopeit.dataobjects", "if x is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str):", "'logger', 'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing import", "def __init_event__(context: EventContext): global model, db if model is None: file_name = os.path.join(context.env['model']['path'],", "as xgb from dataclasses import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import", "event_api from hopeit.app.logger import app_logger # Cell @dataobject @dataclass class OrderInfo: order_id: str", "_append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data,", "dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from hopeit.server.compression", "EventContext): global model, db if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context,", "logger.info(context, \"Looking up features in database...\") assert db, \"Connection to database missing.\" customer_id_features,", "x = data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data:", "str): x = data.get(k) if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] =", "EDIT! File to edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__',", "np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x)", "PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import", "'model', 'db', 'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing import Dict,", "context: EventContext) -> dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y =", "-> dict: if payload is None: response.status = 404 return \"customer or email", "`is_fraud` field as result of prediction\"), 404: (str, \"customer or email not found", "str): for col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is", "payload is None: response.status = 404 return \"customer or email not found (this", "lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]: logger.info(context, \"Looking up features in database...\") assert", "def _update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr)", "EventContext, PostprocessHook from hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger", "db.get(key) if item is None: return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) #", "response.status = 404 return \"customer or email not found (this example only works", "order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data:", "database {address}...\") db = await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item", "context.env['db']['url'] logger.info(context, f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address) # Cell async", "} ) logger = app_logger() model = None db = None features =", "hopeit.app.events import Spawn, SHUFFLE from hopeit.app.api import event_api from hopeit.app.logger import app_logger #", "OrderInfo: order_id: str customer_id: str order_date: datetime email: str ip_addr: str order_amount: float", "json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str): _append(data,", "np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data: dict, context: EventContext) ->", "x is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by: str): col", "as pd import numpy as np import xgboost as xgb from dataclasses import", "data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data: dict, context: EventContext) -> dict:", "import event_api from hopeit.app.logger import app_logger # Cell @dataobject @dataclass class OrderInfo: order_id:", "\"Looking up features in database...\") assert db, \"Connection to database missing.\" customer_id_features, email_features", "typing import Dict, Optional from datetime import datetime, timezone, timedelta import os import", "is not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}']", "import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization import serialize, Serialization, deserialize from", "Cell async def predict(data: dict, context: EventContext) -> dict: df = pd.DataFrame([data], columns=features)", "'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email',", "deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo, context: EventContext) ->", "float location_lat: float location_long: float # Cell __steps__ = ['lookup_features', 'predict'] __api__ =", "x = data.get(f'{col}_by_{by}') if x is not None: x = np.array(x) data[f'{col}_max_by_{by}'] =", "async def _lookup_db(key: str): item = await db.get(key) if item is None: return", "np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data: dict,", "@dataobject @dataclass class OrderInfo: order_id: str customer_id: str order_date: datetime email: str ip_addr:", "location_lat: float location_long: float # Cell __steps__ = ['lookup_features', 'predict'] __api__ = event_api(", "os import json import pickle import aioredis import asyncio import pandas as pd", "dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] =", "context: EventContext, response: PostprocessHook) -> dict: if payload is None: response.status = 404", "if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction", "__api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"), responses={ 200: (dict, \"features", "= data.get(f'{col}_by_{by}') if x is not None: x = np.array(x) data[f'{col}_max_by_{by}'] = np.max(x)", "= np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell async def predict(data: dict, context: EventContext)", "{file_name}...\") with open(file_name, 'rb') as fb: model = pickle.load(fb) if db is None:", "Optional[dict]: logger.info(context, \"Looking up features in database...\") assert db, \"Connection to database missing.\"", "data[f'{col}_max_by_{by}'] = np.max(x) data[f'{col}_min_by_{by}'] = np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}']", "'features', '__init_event__', 'lookup_features', 'predict', '__postprocess__'] # Cell from typing import Dict, Optional from", "File to edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo', '__steps__', '__api__', 'logger',", "from hopeit.app.logger import app_logger # Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id:", "not found (this example only works for known customer_id and email)\") } )", "order_amount: float location_lat: float location_long: float # Cell __steps__ = ['lookup_features', 'predict'] __api__", "import app_logger # Cell @dataobject @dataclass class OrderInfo: order_id: str customer_id: str order_date:", "x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}',", "async def __init_event__(context: EventContext): global model, db if model is None: file_name =", "or email_features is None: return None return { **_update_features(order, email_features, 'email'), **_update_features(order, customer_id_features,", "None: response.status = 404 return \"customer or email not found (this example only", "['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] =", "data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] = len(x) def _calc_amount_stats(data: dict, by:", "def _append(data: dict, k: str, new_item: str): x = data.get(k) if isinstance(x, str):", "datetime email: str ip_addr: str order_amount: float location_lat: float location_long: float # Cell", "_calc_amount_stats(data, 'customer_id') _calc_amount_stats(data, 'email') return data def _calc_counts(data: dict, by: str): for col", "if isinstance(x, str): x = json.loads(x) x.append(new_item) data[k] = list(set(x[-10:])) def _update_features(order: OrderInfo,", "df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x) data['is_fraud'] = y[0].item()", "model = None db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id',", "order_date: datetime email: str ip_addr: str order_amount: float location_lat: float location_long: float #", "'customer_id'), **order.to_dict() } def _append(data: dict, k: str, new_item: str): x = data.get(k)", "and email)\") } ) logger = app_logger() model = None db = None", "'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext): global", "OrderInfo, data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}',", "Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if payload", "known customer_id and email)\") } ) logger = app_logger() model = None db", "aioredis import asyncio import pandas as pd import numpy as np import xgboost", "responses={ 200: (dict, \"features used for prediction contatining `is_fraud` field as result of", "data: dict, by: str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email)", "db, \"Connection to database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) )", "xgboost as xgb from dataclasses import dataclass from hopeit.dataobjects import dataobject from hopeit.server.serialization", "model for prediction from {file_name}...\") with open(file_name, 'rb') as fb: model = pickle.load(fb)", "str): col = 'order_amount' x = data.get(f'{col}_by_{by}') if x is not None: x", "404 return \"customer or email not found (this example only works for known", "= await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key: str): item = await db.get(key)", "deserialize from hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events import", "for col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if x is not", "'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email']", "EventContext) -> dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df) y = model.predict(x)", "\"features used for prediction contatining `is_fraud` field as result of prediction\"), 404: (str,", "None db = None features = ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id',", "'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email',", "up features in database...\") assert db, \"Connection to database missing.\" customer_id_features, email_features =", "from datetime import datetime, timezone, timedelta import os import json import pickle import", "dict, by: str): for col in ['ip_addr', 'customer_id', 'email']: x = data.get(f'{col}_by_{by}') if", "return None return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict) # Cell async def lookup_features(order: OrderInfo,", "'order_amount_max_by_customer_id', 'order_amount_sum_by_customer_id', 'order_amount_mean_by_email', 'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email', 'order_amount_sum_by_email'] # Cell async def __init_event__(context: EventContext):", "def predict(data: dict, context: EventContext) -> dict: df = pd.DataFrame([data], columns=features) x =", "# Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict: if", "# Cell async def _lookup_db(key: str): item = await db.get(key) if item is", "from {file_name}...\") with open(file_name, 'rb') as fb: model = pickle.load(fb) if db is", "import pandas as pd import numpy as np import xgboost as xgb from", "for prediction from {file_name}...\") with open(file_name, 'rb') as fb: model = pickle.load(fb) if", "y[0].item() return data # Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook)", "is None or email_features is None: return None return { **_update_features(order, email_features, 'email'),", "= np.min(x) data[f'{col}_mean_by_{by}'] = np.mean(x) data[f'{col}_std_by_{by}'] = np.std(x) data[f'{col}_sum_by_{by}'] = np.sum(x) # Cell", "f\"Connecting to database {address}...\") db = await aioredis.create_redis_pool(address) # Cell async def _lookup_db(key:", "data # Cell async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict:", ") if customer_id_features is None or email_features is None: return None return {", "os.path.join(context.env['model']['path'], context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\") with open(file_name, 'rb') as", "Serialization, deserialize from hopeit.server.compression import Compression from hopeit.app.context import EventContext, PostprocessHook from hopeit.app.events", "order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id) _calc_counts(data, 'customer_id') _calc_counts(data, 'email') _calc_amount_stats(data, 'customer_id')", "__init_event__(context: EventContext): global model, db if model is None: file_name = os.path.join(context.env['model']['path'], context.env['model']['name'])", "asyncio.gather( _lookup_db(order.customer_id), _lookup_db(order.email) ) if customer_id_features is None or email_features is None: return", "predict(data: dict, context: EventContext) -> dict: df = pd.DataFrame([data], columns=features) x = xgb.DMatrix(df)", "database...\") assert db, \"Connection to database missing.\" customer_id_features, email_features = await asyncio.gather( _lookup_db(order.customer_id),", "__steps__ = ['lookup_features', 'predict'] __api__ = event_api( summary=\"Live: Predict Fraud\", payload=(OrderInfo, \"Order Information\"),", "str): _append(data, f'order_amount_by_{by}', order.order_amount) _append(data, f'ip_addr_by_{by}', order.ip_addr) _append(data, f'email_by_{by}', order.email) _append(data, f'customer_id_by_{by}', order.customer_id)", "for prediction contatining `is_fraud` field as result of prediction\"), 404: (str, \"customer or", "context.env['model']['name']) logger.info(context, f\"Loading model for prediction from {file_name}...\") with open(file_name, 'rb') as fb:", "# AUTOGENERATED! DO NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified). __all__", "DO NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified). __all__ = ['OrderInfo',", "'email']: x = data.get(f'{col}_by_{by}') if x is not None: data[f'num_{col}_by_{by}'] = len(x) def", "= ['order_amount', 'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id', 'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id', 'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email', 'order_amount_mean_by_customer_id', 'order_amount_std_by_customer_id',", "import Dict, Optional from datetime import datetime, timezone, timedelta import os import json", "email not found (this example only works for known customer_id and email)\") }", "import xgboost as xgb from dataclasses import dataclass from hopeit.dataobjects import dataobject from", "customer_id_features, 'customer_id'), **order.to_dict() } def _append(data: dict, k: str, new_item: str): x =" ]
[]
[ "'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields': ('username',) }) )", "('name', 'submitted') readonly_fields = ('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details')", "}), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details',", "class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields", "'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted')", "'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields': ('username',) }) ) admin.site.register(Order, OrderAdmin)", "Register your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter", "Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes':", "models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter = ('name',", "'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields':", "# Register your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted')", "admin from .models import Order # Register your models here. class OrderAdmin(admin.ModelAdmin): list_display", "('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin',", "= ( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',),", "{ 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',),", "'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address',", "django.contrib import admin from .models import Order # Register your models here. class", "Order # Register your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number',", "('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets =", "'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets = (", "'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }),", "('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields':", ".models import Order # Register your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id',", "'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields': ('username',)", "import Order # Register your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name',", "('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields': ('username',) })", "= ('name', 'submitted') readonly_fields = ('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number',", "}), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order", "'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets = ( (None,", "'submitted') readonly_fields = ('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details') }),", "fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes':", "'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets = ( (None, {'fields':", "('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', {", "(None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name',", "Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', {", "('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone')", "list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets = ( (None, {'fields': ('name',", "= ('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details',", "'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }),", "list_display = ('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',)", "'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient", "import admin from .models import Order # Register your models here. class OrderAdmin(admin.ModelAdmin):", "here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted')", "from .models import Order # Register your models here. class OrderAdmin(admin.ModelAdmin): list_display =", "from django.contrib import admin from .models import Order # Register your models here.", "('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name',", "OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields =", "{ 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes':", "= ('id', 'name', 'phone_number', 'submitted') list_filter = ('name', 'submitted') readonly_fields = ('submitted',) fieldsets", "('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', {", "('pickup_name', 'pickup_address', 'pickup_phone') }), ('Recipient Details', { 'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone',", "{'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields': ('pickup_name', 'pickup_address',", "'classes': ('collapse',), 'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',),", "( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up Details', { 'classes': ('collapse',), 'fields':", "your models here. class OrderAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'phone_number', 'submitted') list_filter =", "readonly_fields = ('submitted',) fieldsets = ( (None, {'fields': ('name', 'phone_number', 'order_details') }), ('Pick-Up", "'recipient_phone', 'submitted') }), ('Order Admin', { 'classes': ('collapse',), 'fields': ('username',) }) ) admin.site.register(Order," ]
[ "models from rapt.util import edit_yaml, dump_yaml from pprint import pformat @click.command() def buildpack():", "@click.command() def buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '',", "'available buildpacks': [ bp.repo_url for bp in query('buildpack', vr) ] } config =", "click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create %s %s!'", "get_vr from rapt.models import query, models from rapt.util import edit_yaml, dump_yaml from pprint", "} vr = get_vr() info = { 'available buildpacks': [ bp.repo_url for bp", "for bp in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack", "'hg'], 'description': '', 'order': 0, } vr = get_vr() info = { 'available", "'', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0, } vr = get_vr() info", "'', 'order': 0, } vr = get_vr() info = { 'available buildpacks': [", "dump_yaml from pprint import pformat @click.command() def buildpack(): tmpl = { 'repo_url': '',", "vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config))", "'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0, } vr = get_vr()", "pformat @click.command() def buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description':", "click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create %s %s!' % (bp.repo_url, bp.resource_uri))", "{ 'available buildpacks': [ bp.repo_url for bp in query('buildpack', vr) ] } config", "0, } vr = get_vr() info = { 'available buildpacks': [ bp.repo_url for", "vr = get_vr() info = { 'available buildpacks': [ bp.repo_url for bp in", "['git', 'hg'], 'description': '', 'order': 0, } vr = get_vr() info = {", "'order': 0, } vr = get_vr() info = { 'available buildpacks': [ bp.repo_url", "import query, models from rapt.util import edit_yaml, dump_yaml from pprint import pformat @click.command()", "import click from rapt.connection import get_vr from rapt.models import query, models from rapt.util", "info = { 'available buildpacks': [ bp.repo_url for bp in query('buildpack', vr) ]", "click from rapt.connection import get_vr from rapt.models import query, models from rapt.util import", "def buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order':", "= { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0, } vr", "rapt.util import edit_yaml, dump_yaml from pprint import pformat @click.command() def buildpack(): tmpl =", "edit_yaml, dump_yaml from pprint import pformat @click.command() def buildpack(): tmpl = { 'repo_url':", "pprint import pformat @click.command() def buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git',", "click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr,", "query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n')", "edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp", "'description': '', 'order': 0, } vr = get_vr() info = { 'available buildpacks':", "import get_vr from rapt.models import query, models from rapt.util import edit_yaml, dump_yaml from", "{ 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0, } vr =", "buildpacks': [ bp.repo_url for bp in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl),", "if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create %s %s!' % (bp.repo_url,", "buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0,", "following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create", "= edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'):", "import edit_yaml, dump_yaml from pprint import pformat @click.command() def buildpack(): tmpl = {", "bp.repo_url for bp in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating", "in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following", "rapt.connection import get_vr from rapt.models import query, models from rapt.util import edit_yaml, dump_yaml", "] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo()", "from pprint import pformat @click.command() def buildpack(): tmpl = { 'repo_url': '', 'repo_type':", "= get_vr() info = { 'available buildpacks': [ bp.repo_url for bp in query('buildpack',", "with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create()", "from rapt.models import query, models from rapt.util import edit_yaml, dump_yaml from pprint import", "get_vr() info = { 'available buildpacks': [ bp.repo_url for bp in query('buildpack', vr)", "dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp =", "from rapt.connection import get_vr from rapt.models import query, models from rapt.util import edit_yaml,", "query, models from rapt.util import edit_yaml, dump_yaml from pprint import pformat @click.command() def", "'repo_type': ['git', 'hg'], 'description': '', 'order': 0, } vr = get_vr() info =", "= { 'available buildpacks': [ bp.repo_url for bp in query('buildpack', vr) ] }", "rapt.models import query, models from rapt.util import edit_yaml, dump_yaml from pprint import pformat", "bp in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with", "buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config)", "import pformat @click.command() def buildpack(): tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'],", "from rapt.util import edit_yaml, dump_yaml from pprint import pformat @click.command() def buildpack(): tmpl", "click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create %s %s!' %", "config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create buildpack?'): bp = models.Buildpack(vr, config) bp.create() click.echo('Create %s", "[ bp.repo_url for bp in query('buildpack', vr) ] } config = edit_yaml(dump_yaml(tmpl), dump_yaml(info))", "} config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if", "config = edit_yaml(dump_yaml(tmpl), dump_yaml(info)) click.echo('Creating buildpack with following config:\\n') click.echo(pformat(config)) click.echo() if click.confirm('Create", "tmpl = { 'repo_url': '', 'repo_type': ['git', 'hg'], 'description': '', 'order': 0, }" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "or # implied. # See the License for the specific language governing permissions", "OR CONDITIONS OF ANY KIND, either express or # implied. # See the", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "pass the XML files. \"\"\" def __init__(self, username, password, host, method): self.username =", "import StringIO as sio import os from requests_toolbelt import MultipartEncoder import requests #import", "License. # You may obtain a copy of the License at # #", "gzip from StringIO import StringIO as sio import os from requests_toolbelt import MultipartEncoder", "# set url being used url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped,", "url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data", "# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License,", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password), headers={'Content-type': m.content_type},", "OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the \"License\");", "compliance with the License. # You may obtain a copy of the License", "('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username,", "username self.password = password self.host = host self.method = method def _create_url(self): \"\"\"", "\"\"\" import gzip from StringIO import StringIO as sio import os from requests_toolbelt", "\"\"\" def __init__(self, username, password, host, method): self.username = username self.password = password", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "import MultipartEncoder import requests #import pdb class RPC: \"\"\" RPC class. Used to", "the switches \"\"\" # set url being used url = self._create_url() ziped =", "this file except in compliance with the License. # You may obtain a", "method def _create_url(self): \"\"\" Internal method that returns the switches' URLs given the", "host self.method = method def _create_url(self): \"\"\" Internal method that returns the switches'", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "the cfg attributes. \"\"\" return self.method + '://' + self.host + '/System/File/file_config.html' def", "you may not use this file except in compliance with the License. #", "sio import os from requests_toolbelt import MultipartEncoder import requests #import pdb class RPC:", "to the switches \"\"\" # set url being used url = self._create_url() ziped", "+ '://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to", "+ '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to send a given xml", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close()", "ANY KIND, either express or # implied. # See the License for the", "def _create_url(self): \"\"\" Internal method that returns the switches' URLs given the cfg", "\"\"\" RPC class used to communicate with the hardware \"\"\" import gzip from", "return self.method + '://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method", "the specific language governing permissions and # limitations under the License. \"\"\" RPC", "# pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m", "to send a given xml file to the switches \"\"\" # set url", "in compliance with the License. # You may obtain a copy of the", "given the cfg attributes. \"\"\" return self.method + '://' + self.host + '/System/File/file_config.html'", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "MultipartEncoder import requests #import pdb class RPC: \"\"\" RPC class. Used to connect", "= username self.password = password self.host = host self.method = method def _create_url(self):", "that returns the switches' URLs given the cfg attributes. \"\"\" return self.method +", "file to the switches \"\"\" # set url being used url = self._create_url()", "ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream')))", "use this file except in compliance with the License. # You may obtain", "pdb class RPC: \"\"\" RPC class. Used to connect to the client and", "\"\"\" return self.method + '://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\"", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "for the specific language governing permissions and # limitations under the License. \"\"\"", "gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'),", "not use this file except in compliance with the License. # You may", "and # limitations under the License. \"\"\" RPC class used to communicate with", "sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace()", "RPC class used to communicate with the hardware \"\"\" import gzip from StringIO", "under the License. \"\"\" RPC class used to communicate with the hardware \"\"\"", "RPC class. Used to connect to the client and pass the XML files.", "self.method = method def _create_url(self): \"\"\" Internal method that returns the switches' URLs", "specific language governing permissions and # limitations under the License. \"\"\" RPC class", "send a given xml file to the switches \"\"\" # set url being", "See the License for the specific language governing permissions and # limitations under", "+ self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to send a", "KIND, either express or # implied. # See the License for the specific", "('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "the client and pass the XML files. \"\"\" def __init__(self, username, password, host,", "\"\"\" RPC class. Used to connect to the client and pass the XML", "Used to connect to the client and pass the XML files. \"\"\" def", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "xml file to the switches \"\"\" # set url being used url =", "MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password), headers={'Content-type': m.content_type}, verify=False) print r.text", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue()", "run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload',", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.username = username self.password = password self.host = host self.method = method def", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "StringIO as sio import os from requests_toolbelt import MultipartEncoder import requests #import pdb", "os from requests_toolbelt import MultipartEncoder import requests #import pdb class RPC: \"\"\" RPC", "gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields =", "self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to send a given", "2.0 (the \"License\"); # you may not use this file except in compliance", "('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password),", "class used to communicate with the hardware \"\"\" import gzip from StringIO import", "# you may not use this file except in compliance with the License.", "set url being used url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w')", "ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data,", "from StringIO import StringIO as sio import os from requests_toolbelt import MultipartEncoder import", "agreed to in writing, software # distributed under the License is distributed on", "host, method): self.username = username self.password = password self.host = host self.method =", "the hardware \"\"\" import gzip from StringIO import StringIO as sio import os", "(the \"License\"); # you may not use this file except in compliance with", "client and pass the XML files. \"\"\" def __init__(self, username, password, host, method):", "permissions and # limitations under the License. \"\"\" RPC class used to communicate", "password self.host = host self.method = method def _create_url(self): \"\"\" Internal method that", "# # Unless required by applicable law or agreed to in writing, software", "= ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload',", "= MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password), headers={'Content-type': m.content_type}, verify=False) print", "def __init__(self, username, password, host, method): self.username = username self.password = password self.host", "= password self.host = host self.method = method def _create_url(self): \"\"\" Internal method", "returns the switches' URLs given the cfg attributes. \"\"\" return self.method + '://'", "'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r =", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "username, password, host, method): self.username = username self.password = password self.host = host", "except in compliance with the License. # You may obtain a copy of", "express or # implied. # See the License for the specific language governing", "by applicable law or agreed to in writing, software # distributed under the", "RPC: \"\"\" RPC class. Used to connect to the client and pass the", "class. Used to connect to the client and pass the XML files. \"\"\"", "'/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to send a given xml file", "send_xml(self, xml_content): \"\"\" Method used to send a given xml file to the", "= (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----')", "language governing permissions and # limitations under the License. \"\"\" RPC class used", "self.method + '://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used", "used url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "cfg attributes. \"\"\" return self.method + '://' + self.host + '/System/File/file_config.html' def send_xml(self,", "'1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m,", "communicate with the hardware \"\"\" import gzip from StringIO import StringIO as sio", "to connect to the client and pass the XML files. \"\"\" def __init__(self,", "the XML files. \"\"\" def __init__(self, username, password, host, method): self.username = username", "Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "switches' URLs given the cfg attributes. \"\"\" return self.method + '://' + self.host", "\"\"\" # set url being used url = self._create_url() ziped = sio() with", "run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password), headers={'Content-type':", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "URLs given the cfg attributes. \"\"\" return self.method + '://' + self.host +", "xml_content): \"\"\" Method used to send a given xml file to the switches", "Method used to send a given xml file to the switches \"\"\" #", "# limitations under the License. \"\"\" RPC class used to communicate with the", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "connect to the client and pass the XML files. \"\"\" def __init__(self, username,", "_create_url(self): \"\"\" Internal method that returns the switches' URLs given the cfg attributes.", "= method def _create_url(self): \"\"\" Internal method that returns the switches' URLs given", "file except in compliance with the License. # You may obtain a copy", "attributes. \"\"\" return self.method + '://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content):", "being used url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file:", "limitations under the License. \"\"\" RPC class used to communicate with the hardware", "given xml file to the switches \"\"\" # set url being used url", "from requests_toolbelt import MultipartEncoder import requests #import pdb class RPC: \"\"\" RPC class.", "to the client and pass the XML files. \"\"\" def __init__(self, username, password,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self.host = host self.method = method def _create_url(self): \"\"\" Internal method that returns", "#import pdb class RPC: \"\"\" RPC class. Used to connect to the client", "License for the specific language governing permissions and # limitations under the License.", "CONDITIONS OF ANY KIND, either express or # implied. # See the License", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "OF ANY KIND, either express or # implied. # See the License for", "import gzip from StringIO import StringIO as sio import os from requests_toolbelt import", "= host self.method = method def _create_url(self): \"\"\" Internal method that returns the", "either express or # implied. # See the License for the specific language", "method that returns the switches' URLs given the cfg attributes. \"\"\" return self.method", "governing permissions and # limitations under the License. \"\"\" RPC class used to", "the switches' URLs given the cfg attributes. \"\"\" return self.method + '://' +", "the License. # You may obtain a copy of the License at #", "__init__(self, username, password, host, method): self.username = username self.password = password self.host =", "the License. \"\"\" RPC class used to communicate with the hardware \"\"\" import", "requests #import pdb class RPC: \"\"\" RPC class. Used to connect to the", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "import requests #import pdb class RPC: \"\"\" RPC class. Used to connect to", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "a given xml file to the switches \"\"\" # set url being used", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "used to communicate with the hardware \"\"\" import gzip from StringIO import StringIO", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied.", "switches \"\"\" # set url being used url = self._create_url() ziped = sio()", "hardware \"\"\" import gzip from StringIO import StringIO as sio import os from", "= sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() #", "StringIO import StringIO as sio import os from requests_toolbelt import MultipartEncoder import requests", "gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'), ('running_part',", "applicable law or agreed to in writing, software # distributed under the License", "import os from requests_toolbelt import MultipartEncoder import requests #import pdb class RPC: \"\"\"", "Internal method that returns the switches' URLs given the cfg attributes. \"\"\" return", "def send_xml(self, xml_content): \"\"\" Method used to send a given xml file to", "XML files. \"\"\" def __init__(self, username, password, host, method): self.username = username self.password", "\"\"\" Method used to send a given xml file to the switches \"\"\"", "(c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0", "with the hardware \"\"\" import gzip from StringIO import StringIO as sio import", "self.password = password self.host = host self.method = method def _create_url(self): \"\"\" Internal", "pdb.set_trace() fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m =", "or agreed to in writing, software # distributed under the License is distributed", "mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page',", "Foundation # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See", "class RPC: \"\"\" RPC class. Used to connect to the client and pass", "requests_toolbelt import MultipartEncoder import requests #import pdb class RPC: \"\"\" RPC class. Used", "= self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data =", "to communicate with the hardware \"\"\" import gzip from StringIO import StringIO as", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "files. \"\"\" def __init__(self, username, password, host, method): self.username = username self.password =", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "\"\"\" Internal method that returns the switches' URLs given the cfg attributes. \"\"\"", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'://' + self.host + '/System/File/file_config.html' def send_xml(self, xml_content): \"\"\" Method used to send", "# implied. # See the License for the specific language governing permissions and", "m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r = requests.post(url=url, data=m, auth=(self.username, self.password), headers={'Content-type': m.content_type}, verify=False)", "password, host, method): self.username = username self.password = password self.host = host self.method", "with the License. # You may obtain a copy of the License at", "as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields = (('page', 'file_upload'),", "fields = (('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields,", "and pass the XML files. \"\"\" def __init__(self, username, password, host, method): self.username", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "method): self.username = username self.password = password self.host = host self.method = method", "used to send a given xml file to the switches \"\"\" # set", "in writing, software # distributed under the License is distributed on an \"AS", "2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the", "as sio import os from requests_toolbelt import MultipartEncoder import requests #import pdb class", "(('page', 'file_upload'), ('running_part', '1'), ('file_to_upload', ('file_to_upload', run_data, 'application/octet-stream'))) m = MultipartEncoder(fields=fields, boundary='-----boundary-----') r", "License. \"\"\" RPC class used to communicate with the hardware \"\"\" import gzip", "url being used url = self._create_url() ziped = sio() with gzip.GzipFile(fileobj=ziped, mode='w') as", "with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file: gzip_file.write(xml_content) run_data = ziped.getvalue() ziped.close() # pdb.set_trace() fields", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "@debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return 'Bob' bob = get_bob()", "`{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer", "<reponame>zeroam/TIL def debug_transformer(func): def wrapper(): print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}`", "debug_transformer(func): def wrapper(): print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished') return", "print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer", "print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished') return ret return wrapper", "def wrapper(): print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished') return ret", "= func() print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer def walkout(): print('Bye", "finished') return ret return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob():", "def debug_transformer(func): def wrapper(): print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished')", "ret = func() print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer def walkout():", "ret return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return 'Bob'", "return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return 'Bob' bob", "wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return 'Bob' bob =", "return ret return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return", "`{func.__name__}` finished') return ret return wrapper @debug_transformer def walkout(): print('Bye Felical') @debug_transformer def", "def walkout(): print('Bye Felical') @debug_transformer def get_bob(): return 'Bob' bob = get_bob() print(bob)", "wrapper(): print(f'Function `{func.__name__}` called') ret = func() print(f'Function `{func.__name__}` finished') return ret return", "called') ret = func() print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer def", "func() print(f'Function `{func.__name__}` finished') return ret return wrapper @debug_transformer def walkout(): print('Bye Felical')" ]
[ "maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId", "templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName , renderContext) else: return renderContext", "= geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\"", "viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName ,", "-*- from django.shortcuts import render from django.contrib.auth.decorators import login_required from geoprisma import views", "geoprisma import views as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName')", "views as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId =", "= kwargs.get('viewId') if not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId)", "wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName", "from django.contrib.auth.decorators import login_required from geoprisma import views as geoprisma_views @login_required def maprender(request,", "viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName", "render from django.contrib.auth.decorators import login_required from geoprisma import views as geoprisma_views @login_required def", "# -*- coding: utf-8 -*- from django.shortcuts import render from django.contrib.auth.decorators import login_required", "renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request,", "def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId:", "dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName , renderContext) else: return", "if not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext,", "django.contrib.auth.decorators import login_required from geoprisma import views as geoprisma_views @login_required def maprender(request, *args,", "django.shortcuts import render from django.contrib.auth.decorators import login_required from geoprisma import views as geoprisma_views", "utf-8 -*- from django.shortcuts import render from django.contrib.auth.decorators import login_required from geoprisma import", "**kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId = \"\"", "coding: utf-8 -*- from django.shortcuts import render from django.contrib.auth.decorators import login_required from geoprisma", "from geoprisma import views as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName =", "login_required from geoprisma import views as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName", "from django.shortcuts import render from django.contrib.auth.decorators import login_required from geoprisma import views as", "as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId')", "kwargs.get('viewId') if not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if", "= \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\")", "if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName , renderContext)", "kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request,", "not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict):", "viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName =", "\"\" renderContext = geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return", "geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if", "isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" + templateName , renderContext) else:", "-*- coding: utf-8 -*- from django.shortcuts import render from django.contrib.auth.decorators import login_required from", "@login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not", "import login_required from geoprisma import views as geoprisma_views @login_required def maprender(request, *args, **kwargs):", "import render from django.contrib.auth.decorators import login_required from geoprisma import views as geoprisma_views @login_required", "import views as geoprisma_views @login_required def maprender(request, *args, **kwargs): wsName = kwargs.get('wsName') viewId", "*args, **kwargs): wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId =", "viewId = kwargs.get('viewId') if not viewId: viewId = \"\" renderContext = geoprisma_views.maprender(request, wsName,", "geoprisma_views.maprender(request, wsName, viewId) if isinstance(renderContext, dict): templateName = renderContext.get(\"templateName\") return render(request, \"example_project/\" +", "= kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId = \"\" renderContext =", "wsName = kwargs.get('wsName') viewId = kwargs.get('viewId') if not viewId: viewId = \"\" renderContext" ]
[ "ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step", "m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v, l, m) #", "_model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist():", "readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model", "CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> # Date: Dec. 2018 from __future__", "ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model =", "_model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int", "_model.processSortTracks() def finish(): global _model _model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__))", "array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double", "import print_function from __future__ import division from __future__ import absolute_import from __future__ import", "ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value):", "= os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath) #", "os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath) # Read", "l, m) # Read cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1", "global _workPath step = 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth():", "event.y.values z = event.z.values v = event.volume_id.values l = event.layer_id.values m = event.module_id.values", "def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int *", "def readCells(): global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global", "global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells(): global _model _model.processReadCells()", "* nhits array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits array_type5", "_model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def", "global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist()", "findTracks() # Delete the tracker finish() # Read the submission file filename =", "def setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string)", "= event.y.values z = event.z.values v = event.volume_id.values l = event.layer_id.values m =", "_model global _workPath step = 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def", "global _model array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells array_type3", "cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells = cells.shape[0]", "global _model global _workPath step = 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string)", "ch0 = cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value)", "= ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int *", "====================== TrackML CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> # Date: Dec. 2018", "_workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1", "_dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath)", "global _model _model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist()", "z, v, l, m) # Read cells data hit_id = cells.hit_id.values ch0 =", "setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def", "# Run the traking code findTracks() # Delete the tracker finish() # Read", "<filename>model.py ######################################################################## # ====================== TrackML CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> #", "_model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self,", "cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value =", "code findTracks() # Delete the tracker finish() # Read the submission file filename", "d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits", "os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' +", "= os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate the", "initHits(nhits,x, y, z, v, l, m) # Read cells data hit_id = cells.hit_id.values", "tracker setup(event_id,self.datapath) # Read hits data x = event.x.values y = event.y.values z", "unicode_literals import numpy as np import pandas as pd import os import ctypes", "= ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def", "= ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int *", "nhits array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model", "event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v, l, m)", "Delete the tracker finish() # Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv' sub", "def findTracks(): global _model global _workPath step = 1 c_string = _workPath.encode('utf-8') result", "readStarts(): global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells(): global _model", "__init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): #", "y = event.y.values z = event.z.values v = event.volume_id.values l = event.layer_id.values m", "as pd import os import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so')", "_model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double", "data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value = cells.value.values", "array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int", "event.x.values y = event.y.values z = event.z.values v = event.volume_id.values l = event.layer_id.values", "= cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells =", "Dec. 2018 from __future__ import print_function from __future__ import division from __future__ import", "import unicode_literals import numpy as np import pandas as pd import os import", "= ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 =", "os import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath)", "import os import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model =", "event.shape[0] initHits(nhits,x, y, z, v, l, m) # Read cells data hit_id =", "def readTruth(): global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits(): global", "__future__ import unicode_literals import numpy as np import pandas as pd import os", "the tracker setup(event_id,self.datapath) # Read hits data x = event.x.values y = event.y.values", "ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int", "numpy as np import pandas as pd import os import ctypes _dllPath =", "as np import pandas as pd import os import ctypes _dllPath = os.path.dirname(__file__)", "ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step = 1", "tracker finish() # Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv' sub = pd.read_csv(filename);", "ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int,", "= event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v, l, m) # Read", "_model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks():", "= ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double *", "import pandas as pd import os import ctypes _dllPath = os.path.dirname(__file__) libPath =", "nhits array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits array_type5 =", "= ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath", "# Read cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values", "Date: Dec. 2018 from __future__ import print_function from __future__ import division from __future__", "_workPath step = 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global", "_model array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits array_type3 =", "division from __future__ import absolute_import from __future__ import unicode_literals import numpy as np", "array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1", "setup(event_id,self.datapath) # Read hits data x = event.x.values y = event.y.values z =", "__future__ import absolute_import from __future__ import unicode_literals import numpy as np import pandas", "# Date: Dec. 2018 from __future__ import print_function from __future__ import division from", "'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath)", "= ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes =", "= os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes", "initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits", "nhits array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits result =", "global _model global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m):", "from __future__ import division from __future__ import absolute_import from __future__ import unicode_literals import", "ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells", "_model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells(): global _model _model.processReadCells() def", "predict_one_event(self, event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath) # Read hits data", "# Read hits data x = event.x.values y = event.y.values z = event.z.values", "Read hits data x = event.x.values y = event.y.values z = event.z.values v", "array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value))", "Instantiate the tracker setup(event_id,self.datapath) # Read hits data x = event.x.values y =", "v, l, m) # Read cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values", "* ncells array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells array_type4", "ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks():", "= cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() # Delete the tracker", "(ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8')", "_workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath):", "step = 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model", "cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run", "from __future__ import print_function from __future__ import division from __future__ import absolute_import from", "event, cells): # Instantiate the tracker setup(event_id,self.datapath) # Read hits data x =", "v = event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x,", "array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int", "ch1 = cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the", "the traking code findTracks() # Delete the tracker finish() # Read the submission", "* ncells array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells result", "_model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish():", "array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m))", "initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells", "_model global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global", "global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks()", "import numpy as np import pandas as pd import os import ctypes _dllPath", "array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global", "os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate the tracker", "= (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string =", "######################################################################## # ====================== TrackML CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> # Date:", "ncells array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model", "# Author: <NAME> # Date: Dec. 2018 from __future__ import print_function from __future__", "_model _model.processSortTracks() def finish(): global _model _model.processFinish() class Model: def __init__(self): self.workpath =", "the tracker finish() # Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv' sub =", "= cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() #", "absolute_import from __future__ import unicode_literals import numpy as np import pandas as pd", "def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global", "def sortTracks(): global _model _model.processSortTracks() def finish(): global _model _model.processFinish() class Model: def", "_model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits():", "libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__))", "result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts()", "class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id,", "_workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double *", "ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits", "_model _model.processReadBlacklist() def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def", "= datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits array_type2", "cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() # Delete the tracker finish()", "initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() # Delete the tracker finish() #", "TrackML CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> # Date: Dec. 2018 from", "def readHits(): global _model _model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist(): global", "ncells array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells array_type4 =", "+ _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def", "nhits array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits array_type6 =", "_model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish(): global _model _model.processFinish() class", "def finish(): global _model _model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath", "######################################################################## # Author: <NAME> # Date: Dec. 2018 from __future__ import print_function from", "_workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double))", "cells): # Instantiate the tracker setup(event_id,self.datapath) # Read hits data x = event.x.values", "ncells array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells result =", "# Delete the tracker finish() # Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv'", "nhits = event.shape[0] initHits(nhits,x, y, z, v, l, m) # Read cells data", "result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step = 1 c_string", "MODEL ===================== ######################################################################## # Author: <NAME> # Date: Dec. 2018 from __future__ import", "_model _model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def", "= _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts(): global", "Read cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value", "# ====================== TrackML CHALLENGE MODEL ===================== ######################################################################## # Author: <NAME> # Date: Dec.", "global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits()", "= event.shape[0] initHits(nhits,x, y, z, v, l, m) # Read cells data hit_id", "= (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model", "event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v, l, m) # Read cells", "_model _model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def", "cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() # Delete", "ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits", "np import pandas as pd import os import ctypes _dllPath = os.path.dirname(__file__) libPath", "(ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global", "_model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def", "import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model", "= event.x.values y = event.y.values z = event.z.values v = event.volume_id.values l =", "event.z.values v = event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits = event.shape[0]", "= cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) #", "event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath) # Read hits data x", "= ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double *", "__future__ import print_function from __future__ import division from __future__ import absolute_import from __future__", "= _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def", "self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate", "= os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:'", "traking code findTracks() # Delete the tracker finish() # Read the submission file", "= ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int *", "os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath =", "= _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double", "findTracks(): global _model global _workPath step = 1 c_string = _workPath.encode('utf-8') result =", "m) # Read cells data hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 =", "import division from __future__ import absolute_import from __future__ import unicode_literals import numpy as", "def readStarts(): global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells(): global", "<NAME> # Date: Dec. 2018 from __future__ import print_function from __future__ import division", "= event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v, l,", "pandas as pd import os import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath,", "* ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step =", "= ctypes.c_double * nhits array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int *", "def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells):", "hit_id = cells.hit_id.values ch0 = cells.ch0.values ch1 = cells.ch1.values value = cells.value.values ncells", "print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p)", "_model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global", "* nhits array_type6 = ctypes.c_int * nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global", "Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event,", "= cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking", "sortTracks(): global _model _model.processSortTracks() def finish(): global _model _model.processFinish() class Model: def __init__(self):", "#_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int))", "global _model _model.processSortTracks() def finish(): global _model _model.processFinish() class Model: def __init__(self): self.workpath", "ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits", "global _workPath c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model", "_model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string", "array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits array_type4 = ctypes.c_int", "* ncells array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global", "_model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step = 1 c_string = _workPath.encode('utf-8')", "readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish(): global _model", "_model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8') d_string", "2018 from __future__ import print_function from __future__ import division from __future__ import absolute_import", "global _model array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double * nhits array_type3", "# Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv' sub = pd.read_csv(filename); return sub", "ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes =", "def predict_one_event(self, event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath) # Read hits", "_model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish(): global _model _model.processFinish() class Model:", "os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes =", "x = event.x.values y = event.y.values z = event.z.values v = event.volume_id.values l", "= (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8') d_string =", "= os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath", "= 1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth()", "datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits array_type2 =", "global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish(): global _model _model.processFinish()", "def readWhitelist(): global _model _model.processReadWhitelist() def sortTracks(): global _model _model.processSortTracks() def finish(): global", "print_function from __future__ import division from __future__ import absolute_import from __future__ import unicode_literals", "finish(): global _model _model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath =", "= _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def findTracks(): global _model global _workPath step = 1 c_string =", "= event.z.values v = event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits =", "self.datapath = os.path.dirname(os.path.realpath(__file__)) def predict_one_event(self, event_id, event, cells): # Instantiate the tracker setup(event_id,self.datapath)", "finish() # Read the submission file filename = self.workpath+'/submission'+str(event_id)+'.csv' sub = pd.read_csv(filename); return", "_workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts(): global _model", "pd import os import ctypes _dllPath = os.path.dirname(__file__) libPath = os.path.join(_dllPath, 'libmodel.so') _model", "y, z, v, l, m) # Read cells data hit_id = cells.hit_id.values ch0", "array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int", "ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits", "hits data x = event.x.values y = event.y.values z = event.z.values v =", "c_string = _workPath.encode('utf-8') d_string = datapath.encode('utf-8') _model.processSetup(ctypes.c_int(number),d_string,c_string) def initHits(nhits,x,y,z,v,l,m): global _model array_type1 =", "* nhits array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits array_type4", "readCells(): global _model _model.processReadCells() def readBlacklist(): global _model _model.processReadBlacklist() def readWhitelist(): global _model", "1 c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def", "# Instantiate the tracker setup(event_id,self.datapath) # Read hits data x = event.x.values y", "= event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y,", "* nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int *", "cells.ch1.values value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code", "from __future__ import absolute_import from __future__ import unicode_literals import numpy as np import", "= _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells array_type2 =", "_model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells():", "_model array_type1 = ctypes.c_int * ncells array_type2 = ctypes.c_int * ncells array_type3 =", "c_string = _workPath.encode('utf-8') result = _model.processFindTracks(ctypes.c_int(step),c_string) def readTruth(): global _model _model.processReadTruth() def readStarts():", "Run the traking code findTracks() # Delete the tracker finish() # Read the", "global _model _model.processFinish() class Model: def __init__(self): self.workpath = os.path.dirname(os.path.realpath(__file__)) self.datapath = os.path.dirname(os.path.realpath(__file__))", "ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks() # Delete the", "import absolute_import from __future__ import unicode_literals import numpy as np import pandas as", "* nhits array_type5 = ctypes.c_int * nhits array_type6 = ctypes.c_int * nhits result", "nhits result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells", "nhits array_type2 = ctypes.c_double * nhits array_type3 = ctypes.c_double * nhits array_type4 =", "===================== ######################################################################## # Author: <NAME> # Date: Dec. 2018 from __future__ import print_function", "l = event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z, v,", "Author: <NAME> # Date: Dec. 2018 from __future__ import print_function from __future__ import", "= ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes", "_model.processReadStarts() def readHits(): global _model _model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist():", "readHits(): global _model _model.processReadHits() def readCells(): global _model _model.processReadCells() def readBlacklist(): global _model", "result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m)) def initCells(ncells,hit_id,ch0,ch1,value): global _model array_type1 = ctypes.c_int * ncells array_type2", "readTruth(): global _model _model.processReadTruth() def readStarts(): global _model _model.processReadStarts() def readHits(): global _model", "z = event.z.values v = event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits", "value = cells.value.values ncells = cells.shape[0] initCells(ncells,hit_id,ch0,ch1,value) # Run the traking code findTracks()", "ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells", "_model = ctypes.cdll.LoadLibrary(libPath) #_model = ctypes.CDLL(libPath) _workPath = os.path.dirname(os.path.realpath(__file__)) print('model.py:' + _workPath) _model.processInitHits.argtypes", "= ctypes.c_int * ncells array_type4 = ctypes.c_double * ncells result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value)) def", "def initHits(nhits,x,y,z,v,l,m): global _model array_type1 = ctypes.c_double * nhits array_type2 = ctypes.c_double *", "array_type2 = ctypes.c_int * ncells array_type3 = ctypes.c_int * ncells array_type4 = ctypes.c_double", "__future__ import division from __future__ import absolute_import from __future__ import unicode_literals import numpy", "ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int)) _model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath", "data x = event.x.values y = event.y.values z = event.z.values v = event.volume_id.values", "event.volume_id.values l = event.layer_id.values m = event.module_id.values nhits = event.shape[0] initHits(nhits,x, y, z,", "from __future__ import unicode_literals import numpy as np import pandas as pd import", "* nhits array_type4 = ctypes.c_int * nhits array_type5 = ctypes.c_int * nhits array_type6", "(ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double)) _model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p) def setup(number,datapath): global _model global _workPath c_string = _workPath.encode('utf-8')" ]
[ "disclaimer. # - Redistributions in binary form must reproduce the above copyright #", "# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A", "LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options def get_option(self, key): return self.global_options[key]", "ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "2012 <NAME>. # All rights reserved. # # Redistribution and use in source", "= MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False,", "color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions(): rgn_name", "All rights reserved. # # Redistribution and use in source and binary forms,", "f: f.write(ply) class ColorDef(object): def __init__(self, r, g, b): self.r = r self.g", "None for f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames", "= bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int), rgn_color", "rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn = rgn self.color_def =", "for m in morphs: m = m.to_tree() # Create the color LUT: bi_dict", "OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh", "id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color", "import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import", "bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name]", "AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter =", "region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color def finalise(self, plyfilename): m", "include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first:", "key): return key in self.global_options def get_option(self, key): return self.global_options[key] def get_color(self, alias):", "= NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs = [morphs[0]] for m in", "for f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames =", "LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter", "morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer", "self.op_files = [] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename] for", "IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR", "None in self.region_colors: return self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults: return", "if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m", "provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE", "'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {}", "rgn_color # Check for ignored Region: if None in rgn_colors.values(): for v in", "% rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename)", "dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope = None self.global_options = {", "{} self.region_color_defaults = {} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 }", "filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class", "False, 'What do I do with region: %d ' % rgn # return", "--------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All rights reserved. # # Redistribution", "IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "{} for rgn in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color =", "color): assert not id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color):", "for rgn in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int)", "self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def", "if None in rgn_colors.values(): for v in rgn_colors.values(): if v is not None:", "MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh", "filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return", "# --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All rights reserved. # #", "def add_alias(self, id, color): assert not id in self.color_aliases self.color_aliases[id] = color def", "if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m,", "print '%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check", "MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color def finalise(self,", "return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors =", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR", "with region: %d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename,", "MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {}", "= os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def", "THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED", "NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------", "from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter'", "g, b): self.r = r self.g = g self.b = b def __str__(self):", "the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions():", "# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os", "ignored Region: if None in rgn_colors.values(): for v in rgn_colors.values(): if v is", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED.", "alias): return self.color_aliases[alias] def add_alias(self, id, color): assert not id in self.color_aliases self.color_aliases[id]", "v is not None: print 'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems():", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN", "return self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None", "from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from", "= r self.g = g self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>'", "# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #", "the options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in", "<filename>src/morphforgecontrib/indev/meshtools/core.py #!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012", "'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m =", "= None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file =", "NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs = [morphs[0]] for m in morphs:", "def get_option(self, key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id,", "in self.region_colors: return self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn]", "or without # modification, are permitted provided that the following conditions # are", "from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self,", "file: %s\" % filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d =", "# ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from", "self.region_colors: return self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if", "self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options def get_option(self,", "False, 'Partly ignored structure!' continue # Apply the options: if 'trim' in options:", "MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self, r, g, b):", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object):", "= MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color def", "above copyright # notice, this list of conditions and the following disclaimer. #", "DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #", "NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #", "import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import", "if rgn in self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] #", "rights reserved. # # Redistribution and use in source and binary forms, with", "MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region]", "# are met: # # - Redistributions of source code must retain the", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER", "self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def,", "rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored Region: if None in rgn_colors.values():", "os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope", "PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes = []", "# # - Redistributions of source code must retain the above copyright #", "Redistributions in binary form must reproduce the above copyright # notice, this list", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", "{} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. #", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR", "rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name,", "r, g, b): self.r = r self.g = g self.b = b def", "# notice, this list of conditions and the following disclaimer. # - Redistributions", "# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", "with or without # modification, are permitted provided that the following conditions #", "os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY", "def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f:", "in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m,", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND", "do I do with region: %d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255)))", "in self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] # Global colors?:", "src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope = None self.global_options =", "COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "rgn_colors.iteritems(): print k, v assert False, 'Partly ignored structure!' continue # Apply the", "def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn = rgn", "TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self,", "= global_scope self.region_colors = {} self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn,", "ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) #", "with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn in", "k, v assert False, 'Partly ignored structure!' continue # Apply the options: if", "return self.color_aliases[alias] def add_alias(self, id, color): assert not id in self.color_aliases self.color_aliases[id] =", "m.to_tree() # Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for", "conditions and the following disclaimer. # - Redistributions in binary form must reproduce", "if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS", "code must retain the above copyright # notice, this list of conditions and", "ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "key in self.global_options def get_option(self, key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias]", "get_option(self, key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color):", "self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] # Global colors?: if rgn in", "min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] =", "SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #", "DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", "morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter", "colors? if rgn in self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None]", "disclaimer in # the documentation and/or other materials provided with the # distribution.", "self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file", "def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only", "is not None: print 'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print", "MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors)", "f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames = [filename,", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY", "rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False,", "= {} self.region_color_defaults = {} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0", "filename): possible_filenames = [filename, 'src/' + filename] for pf in possible_filenames: try: return", "# Apply the options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if", "OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import", "-*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All", "options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh =", "other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY,", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF", "if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def", "(k, v) in rgn_colors.iteritems(): print k, v assert False, 'Partly ignored structure!' continue", "AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "and use in source and binary forms, with or without # modification, are", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED", "= '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options def get_option(self, key):", "self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in", "= rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s' %", "# Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn", "self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class", "for (k, v) in rgn_colors.iteritems(): print k, v assert False, 'Partly ignored structure!'", "self.region_color_defaults = {} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file", "self.global_scope = global_scope self.region_colors = {} self.meshes = [] def get_region_color(self, rgn): assert", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "v) in rgn_colors.iteritems(): print k, v assert False, 'Partly ignored structure!' continue #", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES", "rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs", "def __init__(self, r, g, b): self.r = r self.g = g self.b =", "b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def", "def __init__(self, global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes = [] def", "self.region_colors[region] = color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with", "self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key):", "return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color): assert not", "EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color): assert not id in", "the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "rgn_color = self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] =", "self.color_aliases[alias] def add_alias(self, id, color): assert not id in self.color_aliases self.color_aliases[id] = color", "self.g = g self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r,", "<NAME>. # All rights reserved. # # Redistribution and use in source and", "(INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "- Redistributions in binary form must reproduce the above copyright # notice, this", "self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self):", "mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color", "+ filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass", "-*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All rights reserved. #", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA,", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "add_alias(self, id, color): assert not id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self,", "met: # # - Redistributions of source code must retain the above copyright", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from", "= PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files:", "d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object):", "# Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults:", "m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m,", "must retain the above copyright # notice, this list of conditions and the", "self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color #", "class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases =", "'src/' + filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError:", "def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self,", "self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find file: %s\" % filename) def", "only first: # morphs = [morphs[0]] for m in morphs: m = m.to_tree()", "Redistribution and use in source and binary forms, with or without # modification,", "__init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn = rgn self.color_def", "key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color): assert", "source and binary forms, with or without # modification, are permitted provided that", "morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions:", "this list of conditions and the following disclaimer. # - Redistributions in binary", "b): self.r = r self.g = g self.b = b def __str__(self): return", "return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find file: %s\" % filename)", "'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes", "m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s ->", "above copyright # notice, this list of conditions and the following disclaimer in", "'What do I do with region: %d ' % rgn # return ColorDef(200,50,", "self.global_scope.region_color_defaults[None] assert False, 'What do I do with region: %d ' % rgn", "BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "conditions and the following disclaimer in # the documentation and/or other materials provided", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\"", "binary form must reproduce the above copyright # notice, this list of conditions", "Redistributions of source code must retain the above copyright # notice, this list", "has_option_set(self, key): return key in self.global_options def get_option(self, key): return self.global_options[key] def get_color(self,", "form must reproduce the above copyright # notice, this list of conditions and", "---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack:", "self.r = r self.g = g self.b = b def __str__(self): return '<ColorDef:", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "% (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int)", "ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename)", "class ColorDef(object): def __init__(self, r, g, b): self.r = r self.g = g", "{ #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files = []", "= src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def", "%s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored Region:", "%s\" % filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename)", "in rgn_colors.iteritems(): print k, v assert False, 'Partly ignored structure!' continue # Apply", "the above copyright # notice, this list of conditions and the following disclaimer", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import", "AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m", "TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS", "= g self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g,", "'Partly ignored structure!' continue # Apply the options: if 'trim' in options: m", "from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from", "src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs =", "documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE", "m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region,", "r self.g = g self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>' %", "bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions(): rgn_name = rgn.name", "global_scope self.region_colors = {} self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn, int)", "% filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if", "self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files", "PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f)", "raise ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir,", "rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int),", "Check for ignored Region: if None in rgn_colors.values(): for v in rgn_colors.values(): if", "for ignored Region: if None in rgn_colors.values(): for v in rgn_colors.values(): if v", "None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I do with region:", "morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh", "MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def", "in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/'", "# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL,", "get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors? if rgn in self.region_colors: return", "plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f) self.op_files = []", "morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs = [morphs[0]] for m", "int) # Local colors? if rgn in self.region_colors: return self.region_colors[rgn] if None in", "ignored structure!' continue # Apply the options: if 'trim' in options: m =", "forms, with or without # modification, are permitted provided that the following conditions", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc", "# - Redistributions of source code must retain the above copyright # notice,", "source code must retain the above copyright # notice, this list of conditions", "Global colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return", "FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "assert not id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id]", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh)", "THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader", "in source and binary forms, with or without # modification, are permitted provided", "in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What", "self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' +", "materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY", "if v is not None: print 'Partly ignoring Structure:', for (k, v) in", "IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "= os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename,", "# # Redistribution and use in source and binary forms, with or without", "= self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs = [morphs[0]]", "AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj)", "region: %d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options):", "open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors = {}", "os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w')", "if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I do with", "self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I do with region: %d '", "def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is None", "OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "structure!' continue # Apply the options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m,", "v in rgn_colors.values(): if v is not None: print 'Partly ignoring Structure:', for", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "= {} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file =", "color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER", "1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir =", "return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] # Global colors?: if rgn", "# - Redistributions in binary form must reproduce the above copyright # notice,", "not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR", "filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise", "color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename):", "rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored Region: if None in", "return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I", "rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s -> %s'", "= rgn_color # Check for ignored Region: if None in rgn_colors.values(): for v", "= MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color):", "region, color): self.region_colors[region] = color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply =", "MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer", "'/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options def get_option(self, key): return", "ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "__init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope = None self.global_options", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR", "pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find", "import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter", "def set_region_color(self, region, color): self.region_colors[region] = color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes)", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print '%s", "HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "this list of conditions and the following disclaimer in # the documentation and/or", "self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] # Global colors?: if", "of conditions and the following disclaimer in # the documentation and/or other materials", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE", "ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print k, v assert False, 'Partly", "rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope =", "def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename)", "in self.global_options def get_option(self, key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def", "find file: %s\" % filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d", "def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f) self.op_files", "color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn = rgn self.color_def = color_def", "= None for f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename):", "= [morphs[0]] for m in morphs: m = m.to_tree() # Create the color", "IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO,", "= {} for rgn in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color", "use in source and binary forms, with or without # modification, are permitted", "= color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self,", "[] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename] for pf in", "NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY", "and the following disclaimer. # - Redistributions in binary form must reproduce the", "in binary form must reproduce the above copyright # notice, this list of", "= dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return", "assert False, 'What do I do with region: %d ' % rgn #", "permitted provided that the following conditions # are met: # # - Redistributions", "self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def", "filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: #", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF", "# notice, this list of conditions and the following disclaimer in # the", "(self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert", "[morphs[0]] for m in morphs: m = m.to_tree() # Create the color LUT:", "in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh", "# All rights reserved. # # Redistribution and use in source and binary", "self.global_options def get_option(self, key): return self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self,", "CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS", "modification, are permitted provided that the following conditions # are met: # #", "# modification, are permitted provided that the following conditions # are met: #", "notice, this list of conditions and the following disclaimer in # the documentation", "minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults", "= color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename)", "import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import", "options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options:", "coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All rights", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED", "os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self,", "__str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn,", "pass raise ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self, filename): filename =", "class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn", "Local colors? if rgn in self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return", "# Hack: only first: # morphs = [morphs[0]] for m in morphs: m", "following disclaimer. # - Redistributions in binary form must reproduce the above copyright", "HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "Region: if None in rgn_colors.values(): for v in rgn_colors.values(): if v is not", "USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "= { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files =", "# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I do", "self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in", "= MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self, r, g,", "'<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert", "set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope", "RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef) self.rgn =", "options): src_obj = self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs", "def has_option_set(self, key): return key in self.global_options def get_option(self, key): return self.global_options[key] def", "isinstance(rgn, int) # Local colors? if rgn in self.region_colors: return self.region_colors[rgn] if None", "self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename]", "= m.to_tree() # Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {}", "self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert", "self.global_options[key] def get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color): assert not id", "LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class", "possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find file: %s\"", "options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset'])", "None self.global_options = { #MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file", "__init__(self, r, g, b): self.r = r self.g = g self.b = b", "Hack: only first: # morphs = [morphs[0]] for m in morphs: m =", "morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file,", "OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "reproduce the above copyright # notice, this list of conditions and the following", "morphs = [morphs[0]] for m in morphs: m = m.to_tree() # Create the", "and/or other materials provided with the # distribution. # # THIS SOFTWARE IS", "new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope", "return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def):", "BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for", "try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find file: %s\" %", "# Copyright (c) 2012 <NAME>. # All rights reserved. # # Redistribution and", "copyright # notice, this list of conditions and the following disclaimer in #", "self.global_scope.getFileObjRead(filename) morphs = NewSWCLoader.load_swc_set(src=src_obj) # Hack: only first: # morphs = [morphs[0]] for", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename)", "max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m =", "and binary forms, with or without # modification, are permitted provided that the", "def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope = None", "def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d):", "class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope", "AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "Structure:', for (k, v) in rgn_colors.iteritems(): print k, v assert False, 'Partly ignored", "from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from", "colors?: if rgn in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None]", "#MeshGenerationOptions.minimum_diameter: 1.0 } self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir", "the following disclaimer. # - Redistributions in binary form must reproduce the above", "plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class", "self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope = None self.global_options = { #MeshGenerationOptions.minimum_diameter:", "Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults = {} self.currentplyscope =", "(c) 2012 <NAME>. # All rights reserved. # # Redistribution and use in", "in # the documentation and/or other materials provided with the # distribution. #", "self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn, int) assert isinstance(color_def, ColorDef)", "self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None", "# return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj = self.global_scope.getFileObjRead(filename) morphs =", "provided that the following conditions # are met: # # - Redistributions of", "get_color(self, alias): return self.color_aliases[alias] def add_alias(self, id, color): assert not id in self.color_aliases", "m = m.to_tree() # Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors =", "' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj =", "list of conditions and the following disclaimer. # - Redistributions in binary form", "m in morphs: m = m.to_tree() # Create the color LUT: bi_dict =", "color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as", "and the following disclaimer in # the documentation and/or other materials provided with", "import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object):", "the following disclaimer in # the documentation and/or other materials provided with the", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY", "# morphs = [morphs[0]] for m in morphs: m = m.to_tree() # Create", "= TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def", "continue # Apply the options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim'])", "SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE", "if None in self.region_colors: return self.region_colors[None] # Global colors?: if rgn in self.global_scope.region_color_defaults:", "finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply)", "= self.get_region_color(rgn_int) print '%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color", "color): self.region_colors[region] = color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply = MeshWriterPLY.build_string(m)", "WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key", "print 'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print k, v assert", "# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "rgn_colors = {} for rgn in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name)", "ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "= b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object):", "- Redistributions of source code must retain the above copyright # notice, this", "rgn): assert isinstance(rgn, int) # Local colors? if rgn in self.region_colors: return self.region_colors[rgn]", "self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors", "rgn_colors.values(): if v is not None: print 'Partly ignoring Structure:', for (k, v)", "except KeyError: pass raise ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self, filename):", "id, color): assert not id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id,", "first: # morphs = [morphs[0]] for m in morphs: m = m.to_tree() #", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR", "not id in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] =", "FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE", "{} self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors?", "rgn in m.get_regions(): rgn_name = rgn.name rgn_int = bi_dict.region_name_to_int(rgn_name) rgn_color = self.get_region_color(rgn_int) print", "assert isinstance(rgn, int) # Local colors? if rgn in self.region_colors: return self.region_colors[rgn] if", "INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT,", "plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color def finalise(self, plyfilename):", "the following conditions # are met: # # - Redistributions of source code", "print k, v assert False, 'Partly ignored structure!' continue # Apply the options:", "#!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>.", "CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "def get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors? if rgn in self.region_colors:", "import MorphologyMinimumDiameter class MeshGenerationOptions: minimum_diameter = 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file):", "LUT: bi_dict = m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions(): rgn_name =", "[] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options def", "import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors? if", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR", "self.meshes.append(mesh) def set_region_color(self, region, color): self.region_colors[region] = color def finalise(self, plyfilename): m =", "morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr", "% (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored Region: if", "POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from", "assert False, 'Partly ignored structure!' continue # Apply the options: if 'trim' in", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF", "= 'MinimumDiameter' class Context(object): def __init__(self, src_zip_file, dst_zip_file): self.color_aliases = {} self.region_color_defaults =", "in rgn_colors.values(): if v is not None: print 'Partly ignoring Structure:', for (k,", "self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir)", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE", "None: print 'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print k, v", "# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT", "utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 <NAME>. # All rights reserved.", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE", "= [] def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename] for pf", "= AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter):", "must reproduce the above copyright # notice, this list of conditions and the", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE", "of conditions and the following disclaimer. # - Redistributions in binary form must", "return self.global_scope.region_color_defaults[None] assert False, 'What do I do with region: %d ' %", "color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self)", "src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self,", "-> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored", "None in rgn_colors.values(): for v in rgn_colors.values(): if v is not None: print", "__init__(self, global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes = [] def get_region_color(self,", "g self.b = b def __str__(self): return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b)", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "[filename, 'src/' + filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except", "set_region_color(self, region, color): self.region_colors[region] = color def finalise(self, plyfilename): m = TriangleMesh.merge(meshes=self.meshes) ply", "in morphs: m = m.to_tree() # Create the color LUT: bi_dict = m.region_number_to_name_bidict", "= [filename, 'src/' + filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r')", "I do with region: %d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def", "with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self, r, g, b): self.r", "} self.src_zip_file = src_zip_file self.dst_zip_file = dst_zip_file self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/'", "def getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename] for pf in possible_filenames:", "from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter class", "# Local colors? if rgn in self.region_colors: return self.region_colors[rgn] if None in self.region_colors:", "notice, this list of conditions and the following disclaimer. # - Redistributions in", "MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT", "in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do I do with region: %d", "# the documentation and/or other materials provided with the # distribution. # #", "the documentation and/or other materials provided with the # distribution. # # THIS", "morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer from morphforgecontrib.morphology.util.morphologytranslator", "self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self, r, g, b): self.r =", "following disclaimer in # the documentation and/or other materials provided with the #", "= m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions(): rgn_name = rgn.name rgn_int", "getFileObjRead(self, filename): possible_filenames = [filename, 'src/' + filename] for pf in possible_filenames: try:", "conditions # are met: # # - Redistributions of source code must retain", "import os from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import", "KeyError: pass raise ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self, filename): filename", "retain the above copyright # notice, this list of conditions and the following", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "rgn in self.region_colors: return self.region_colors[rgn] if None in self.region_colors: return self.region_colors[None] # Global", "self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter)) mesh = MeshFromGTS.build(m, plot=False, region_color_map=rgn_colors) self.meshes.append(mesh) def set_region_color(self,", "(%d,%d,%d)>' % (self.r, self.g, self.b) class RegionColorDef(object): def __init__(self, rgn, color_def): assert isinstance(rgn,", "[] def get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors? if rgn in", "'r') except KeyError: pass raise ValueError(\"Can't find file: %s\" % filename) def getFileObjWrite(self,", "reserved. # # Redistribution and use in source and binary forms, with or", "return key in self.global_options def get_option(self, key): return self.global_options[key] def get_color(self, alias): return", "# Redistribution and use in source and binary forms, with or without #", "if not os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope):", "morphs: m = m.to_tree() # Create the color LUT: bi_dict = m.region_number_to_name_bidict rgn_colors", "following conditions # are met: # # - Redistributions of source code must", "do with region: %d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self,", "SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "Apply the options: if 'trim' in options: m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset'", "Copyright (c) 2012 <NAME>. # All rights reserved. # # Redistribution and use", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF", "(rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for ignored Region: if None", "ColorDef(object): def __init__(self, r, g, b): self.r = r self.g = g self.b", "= [] def get_region_color(self, rgn): assert isinstance(rgn, int) # Local colors? if rgn", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS;", "in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't find file:", "for v in rgn_colors.values(): if v is not None: print 'Partly ignoring Structure:',", "not None: print 'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print k,", "that the following conditions # are met: # # - Redistributions of source", "OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "without # modification, are permitted provided that the following conditions # are met:", "= {} self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn, int) # Local", "v assert False, 'Partly ignored structure!' continue # Apply the options: if 'trim'", "%d ' % rgn # return ColorDef(200,50, np.min((rgn*20,255))) def include_file(self, filename, options): src_obj", "assert self.currentplyscope is None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope =", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "in rgn_colors.values(): for v in rgn_colors.values(): if v is not None: print 'Partly", "binary forms, with or without # modification, are permitted provided that the following", "of source code must retain the above copyright # notice, this list of", "distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "are permitted provided that the following conditions # are met: # # -", "m.region_number_to_name_bidict rgn_colors = {} for rgn in m.get_regions(): rgn_name = rgn.name rgn_int =", "possible_filenames = [filename, 'src/' + filename] for pf in possible_filenames: try: return self.src_zip_file.open(pf,", "global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes = [] def get_region_color(self, rgn):", "rgn_colors.values(): for v in rgn_colors.values(): if v is not None: print 'Partly ignoring", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #", "os.path.exists(d): os.makedirs(d) self.op_files.append(filename) return open(filename, 'w') class PlyScope(object): def __init__(self, global_scope): self.global_scope =", "self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f) self.op_files = [] def getFileObjRead(self,", "in self.color_aliases self.color_aliases[id] = color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def", "the above copyright # notice, this list of conditions and the following disclaimer.", "import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import", "close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f in self.op_files: self.dst_zip_file.write(f) self.op_files =", "class PlyScope(object): def __init__(self, global_scope): self.global_scope = global_scope self.region_colors = {} self.meshes =", "'%s -> %s' % (rgn_name, rgn_int), rgn_color rgn_colors[rgn_name] = rgn_color # Check for", "INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED", "are met: # # - Redistributions of source code must retain the above", "self.op_files = [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in", "GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "= [] self.op_dir = '/tmp/mf/meshbuilder/' LocMgr.ensure_dir_exists(self.op_dir) def has_option_set(self, key): return key in self.global_options", "INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "= color def set_default_region_color(self, rgn_id, color): self.region_color_defaults[rgn_id] = color def new_ply_block(self): assert self.currentplyscope", "getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d)", "self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[rgn] if None in self.global_scope.region_color_defaults: return self.global_scope.region_color_defaults[None] assert False, 'What do", "ply = MeshWriterPLY.build_string(m) with self.global_scope.getFileObjWrite(plyfilename) as f: f.write(ply) class ColorDef(object): def __init__(self, r,", "f.write(ply) class ColorDef(object): def __init__(self, r, g, b): self.r = r self.g =", "'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter): m = MorphologyMinimumDiameter.ensure(m, min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter))", "None self.currentplyscope = PlyScope(global_scope=self) def close_ply_block(self, plyfilename): self.currentplyscope.finalise(plyfilename=plyfilename) self.currentplyscope = None for f", "self.region_colors = {} self.meshes = [] def get_region_color(self, rgn): assert isinstance(rgn, int) #", "copyright # notice, this list of conditions and the following disclaimer. # -", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "for pf in possible_filenames: try: return self.src_zip_file.open(pf, 'r') except KeyError: pass raise ValueError(\"Can't", "from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from morphforge.morphology.mesh.mesh import TriangleMesh from morphforge.core.mgrs.locmgr import LocMgr from", "'Partly ignoring Structure:', for (k, v) in rgn_colors.iteritems(): print k, v assert False,", "as f: f.write(ply) class ColorDef(object): def __init__(self, r, g, b): self.r = r", "HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY,", "filename) def getFileObjWrite(self, filename): filename = os.path.join(self.op_dir, filename) d = os.path.dirname(filename) if not", "# distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO,", "m = AxonTrimmer.trim_axon_from_morphology(m, max_dist_to_parent=options['trim']) if 'offset' in options: m = MorphologyTranslator.translate(morphology=m, offset=options['offset']) if", "rgn_colors[rgn_name] = rgn_color # Check for ignored Region: if None in rgn_colors.values(): for", "# Check for ignored Region: if None in rgn_colors.values(): for v in rgn_colors.values():", "from morphforge.morphology.importer.import_array_swc import NewSWCLoader from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS from morphforge.morphology.mesh.writer_ply import MeshWriterPLY from", "list of conditions and the following disclaimer in # the documentation and/or other" ]
[ "data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg,", "MoveNet, Task from config import cfg from lib.utils.utils import arg_parser # Script to", "= Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model)", "the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg)", "model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader", "run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) #", "various outputs of the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train')", "run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__", "the various outputs of the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"],", "import arg_parser # Script to create and save as images all the various", "mode='train') data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task =", "run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco #", "https://github.com/fire717 \"\"\" from lib import init, Data, MoveNet, Task from config import cfg", "run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\")", "arg_parser # Script to create and save as images all the various outputs", "init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader() #", "for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ == '__main__':", "# for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ ==", "lib.utils.utils import arg_parser # Script to create and save as images all the", "main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader()", "# run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader,", "= Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"])", "# _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\")", "images all the various outputs of the model def main(cfg): init(cfg) model =", "Script to create and save as images all the various outputs of the", "save as images all the various outputs of the model def main(cfg): init(cfg)", "\"\"\" @Fire https://github.com/fire717 \"\"\" from lib import init, Data, MoveNet, Task from config", "from config import cfg from lib.utils.utils import arg_parser # Script to create and", "# run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if", "Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader,", "MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader()", "_,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") #", "= data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for", "import init, Data, MoveNet, Task from config import cfg from lib.utils.utils import arg_parser", "cfg from lib.utils.utils import arg_parser # Script to create and save as images", "of the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data =", "= data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\")", "run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ == '__main__': cfg = arg_parser(cfg)", "# Script to create and save as images all the various outputs of", "run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ == '__main__': cfg = arg_parser(cfg) main(cfg)", "\"\"\" from lib import init, Data, MoveNet, Task from config import cfg from", "from lib.utils.utils import arg_parser # Script to create and save as images all", "model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader", "Task from config import cfg from lib.utils.utils import arg_parser # Script to create", "init, Data, MoveNet, Task from config import cfg from lib.utils.utils import arg_parser #", "data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") #", "all the various outputs of the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"],", "model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"])", "width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task", "coco # run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ == '__main__': cfg", "Data, MoveNet, Task from config import cfg from lib.utils.utils import arg_parser # Script", "config import cfg from lib.utils.utils import arg_parser # Script to create and save", "# run_task.modelLoad(cfg[\"newest_ckpt\"]) run_task.predict(test_loader, cfg[\"predict_output_path\"]) # run_task.predict(test_loader, \"output/predict\") if __name__ == '__main__': cfg =", "@Fire https://github.com/fire717 \"\"\" from lib import init, Data, MoveNet, Task from config import", "outputs of the model def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data", "lib import init, Data, MoveNet, Task from config import cfg from lib.utils.utils import", "as images all the various outputs of the model def main(cfg): init(cfg) model", "= MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader = data.getTestDataloader() # _,test_loader =", "test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") #", "import cfg from lib.utils.utils import arg_parser # Script to create and save as", "and save as images all the various outputs of the model def main(cfg):", "Data(cfg) test_loader = data.getTestDataloader() # _,test_loader = data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\")", "create and save as images all the various outputs of the model def", "data.getTrainValDataloader() run_task = Task(cfg, model) run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth\") # run_task.modelLoad(\"/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth\") # run_task.modelLoad(\"output/mbv2_e105_valacc0.80255.pth\") # for coco", "from lib import init, Data, MoveNet, Task from config import cfg from lib.utils.utils", "def main(cfg): init(cfg) model = MoveNet(num_classes=cfg[\"num_classes\"], width_mult=cfg[\"width_mult\"], mode='train') data = Data(cfg) test_loader =", "to create and save as images all the various outputs of the model" ]
[ "sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200", "bitcoinlib utility scripts x = lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1]", "intended if txid != otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps R-value", "from electrum.constants import set_testnet from electrum.ecc import ECPrivkey # The basic bitcoinlib utility", "s]) # Electrum assumes P2SH is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction):", "= 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type =", "bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex'", "= lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile = lambda", "= sats - 200 locktime = 1602565200 # Build the Transaction Input _,", "and compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived", "'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet set_testnet() # I removed the", "sha256d from electrum.constants import set_testnet from electrum.ecc import ECPrivkey # The basic bitcoinlib", "b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile = lambda s: \"\".join([ opcodes[i].hex()", "we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build", "the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction", "PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d from", "# Electrum assumes P2SH is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def", "= PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script =", "i in dir(opcodes) else push_script(i) for i in s]) # Electrum assumes P2SH", "the R-value grinding to use \"legacy\" sig processing # This is the original", "\"legacy\" sig processing # This is the original TXID we are trying to", "compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived at", "Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\", b2x(privkey)) print(\"txid:\", txid) print(\"txn:\", txn)", "target TXID hash\") print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") #", "= lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda", "[note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput,", "# I removed the R-value grinding to use \"legacy\" sig processing # This", "[license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj #", "# Basic constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434')", "not achive target TXID hash\") print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See:", "txid = b2lx(sha256d(x(txn))) # Ensure we arrived at where we intended if txid", "address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime = 1602565200 # Build", "TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry =", "= TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry", "2**32 sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats -", "open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from", "sats_less_fees) # Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version", "tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig", "electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc import ECPrivkey #", "removed the R-value grinding to use \"legacy\" sig processing # This is the", "= 1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get", "b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','')", "b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) #", "and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig", "testnet set_testnet() # I removed the R-value grinding to use \"legacy\" sig processing", "Very simple bitcoin script comiler compile = lambda s: \"\".join([ opcodes[i].hex() if i", "txin.nsequence = sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry,", "hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN wif =", "9999 script_type = 'p2sh' binzero = 2**32 sequence = binzero - 3 address", "txn and compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we", "get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet set_testnet() # I", "are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the", "def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) #", "Input _, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout)", "Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction tx", "exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes,", "P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script)", "bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python", "-m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from", "arrived at where we intended if txid != otxid: print(\"Did not achive target", "print(\"Did not achive target TXID hash\") print(\"Perhaps R-value hashing needs to be reverted\")", "import hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc import ECPrivkey # The", "hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\",", "i in s]) # Electrum assumes P2SH is multisig, this subclass corrects that", "to use \"legacy\" sig processing # This is the original TXID we are", "= b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script)", "# [req] python -m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as", "txid != otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps R-value hashing needs", "multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls,", "be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\", b2x(privkey)) print(\"txid:\", txid)", "'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid =", "binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime =", "b2x(txin.redeem_script) # Set testnet set_testnet() # I removed the R-value grinding to use", "P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig", "\"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s]) #", "ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type =", "else push_script(i) for i in s]) # Electrum assumes P2SH is multisig, this", "= ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type", "redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build", "This is the original TXID we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93'", "-> str: return b2x(txin.redeem_script) # Set testnet set_testnet() # I removed the R-value", "# [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m", "# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/", "h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if", "at github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py #", "# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj", "txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet set_testnet() # I removed", "sats_less_fees = sats - 200 locktime = 1602565200 # Build the Transaction Input", "I removed the R-value grinding to use \"legacy\" sig processing # This is", "txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type = 'p2sh' binzero", "this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin:", "otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps R-value hashing needs to be", "script_type = 'p2sh' binzero = 2**32 sequence = binzero - 3 address =", "expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script =", "a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum # [note]", "x(compile([sig , redeem_script])) # Get the serialized txn and compute txid txn =", "R-value grinding to use \"legacy\" sig processing # This is the original TXID", "(b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') #", "the serialized txn and compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) #", "electrum.ecc import ECPrivkey # The basic bitcoinlib utility scripts x = lambda h:", "x(redeem_script) # Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and", "for i in s]) # Electrum assumes P2SH is multisig, this subclass corrects", "- 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime = 1602565200", "str: return b2x(txin.redeem_script) # Set testnet set_testnet() # I removed the R-value grinding", "# Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign", "pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence", "x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type = 'p2sh' binzero = 2**32", "# [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum # [note] with", "txin.script_sig = x(compile([sig , redeem_script])) # Get the serialized txn and compute txid", "PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime)", "Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid,", "return b2x(txin.redeem_script) # Set testnet set_testnet() # I removed the R-value grinding to", "= 1602565200 # Build the Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey", "txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script", "dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script", "hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile", "TXID we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to", "# [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput,", "else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler", "locktime = 1602565200 # Build the Transaction Input _, privkey, compressed = deserialize_privkey(wif)", "scripts x = lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x =", "needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\", b2x(privkey))", "reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\", b2x(privkey)) print(\"txid:\", txid) print(\"txn:\",", "electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint,", "transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey)", "ECPrivkey # The basic bitcoinlib utility scripts x = lambda h: bytes.fromhex(h) lx", "# Ensure we arrived at where we intended if txid != otxid: print(\"Did", ", redeem_script])) # Get the serialized txn and compute txid txn = tx.serialize()", "from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script", "= tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived at where we intended", "pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction", "set_testnet from electrum.ecc import ECPrivkey # The basic bitcoinlib utility scripts x =", "Copyright 2020 brianddk at github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 #", "Build the Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout", "= x(compile([sig , redeem_script])) # Get the serialized txn and compute txid txn", "# [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93", "if i in dir(opcodes) else push_script(i) for i in s]) # Electrum assumes", "otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N'", "3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime = 1602565200 #", "txin.redeem_script = x(redeem_script) # Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) #", "script comiler compile = lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else", "vout = 1 sats = 9999 script_type = 'p2sh' binzero = 2**32 sequence", "= x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type = 'p2sh' binzero =", "'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction Output txout =", "sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP',", "original TXID we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants", "Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar]", "privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin =", "'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction Output txout", "electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from", "[req] python -m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s:", "from electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc import ECPrivkey", "b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx =", "sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get the serialized", "constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout =", "[txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig ,", "= lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex' in dir(b)", "[tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip", "bitcoin script comiler compile = lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes)", "TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999", "'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type = 'p2sh'", "[repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 #", "bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx", "= script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG'])", "serialized txn and compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure", "compile = lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i) for", "TXID hash\") print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display", "h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','')", "script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script", "is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def", "redeem_script])) # Get the serialized txn and compute txid txn = tx.serialize() txid", "where we intended if txid != otxid: print(\"Did not achive target TXID hash\")", "# [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum", "opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s]) # Electrum", "= 9999 script_type = 'p2sh' binzero = 2**32 sequence = binzero - 3", "brianddk at github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py", "github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref]", "lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in", "#!/usr/bin/env python3 # [rights] Copyright 2020 brianddk at github https://github.com/brianddk # [license] Apache", "2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt", "= b2lx(sha256d(x(txn))) # Ensure we arrived at where we intended if txid !=", "tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) #", "[ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\",", "import ECPrivkey # The basic bitcoinlib utility scripts x = lambda h: bytes.fromhex(h)", "sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig =", "basic bitcoinlib utility scripts x = lambda h: bytes.fromhex(h) lx = lambda h:", "out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime)))", "at where we intended if txid != otxid: print(\"Did not achive target TXID", "deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence =", "utility scripts x = lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x", "github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc]", "= 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime = 1602565200 # Build the", "Get the serialized txn and compute txid txn = tx.serialize() txid = b2lx(sha256d(x(txn)))", "sats = 9999 script_type = 'p2sh' binzero = 2**32 sequence = binzero -", "https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r')", "github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install", "'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin", "= 1 sats = 9999 script_type = 'p2sh' binzero = 2**32 sequence =", "s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey,", "hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc import ECPrivkey # The basic", "from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d from electrum.constants", "'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime = 1602565200 # Build the Transaction", "# Set testnet set_testnet() # I removed the R-value grinding to use \"legacy\"", "if txid != otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps R-value hashing", "R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey)", "grinding to use \"legacy\" sig processing # This is the original TXID we", "processing # This is the original TXID we are trying to hit otxid", "Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the", "hash\") print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results", "BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req]", "# Get the serialized txn and compute txid txn = tx.serialize() txid =", "install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import", "@classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet set_testnet()", "import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet", "= PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout],", "[txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ # [req] python -m pip install electrum #", "s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s])", "b2lx(sha256d(x(txn))) # Ensure we arrived at where we intended if txid != otxid:", "class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return", "1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get the", "200 locktime = 1602565200 # Build the Transaction Input _, privkey, compressed =", "push_script(i) for i in s]) # Electrum assumes P2SH is multisig, this subclass", "the Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout =", "lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex()", "import set_testnet from electrum.ecc import ECPrivkey # The basic bitcoinlib utility scripts x", "= compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the", "Electrum assumes P2SH is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self):", "set_testnet() # I removed the R-value grinding to use \"legacy\" sig processing #", "subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput')", "python3 # [rights] Copyright 2020 brianddk at github https://github.com/brianddk # [license] Apache 2.0", "https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32:", "trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN", "electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d from electrum.constants import", "= lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda b:", "comiler compile = lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i)", "deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet from", "Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction tx =", "# Build the Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed)", "wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats = 9999 script_type", "locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script]))", "print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\",", "achive target TXID hash\") print(\"Perhaps R-value hashing needs to be reverted\") Print(\"See: https://redd.it/jf97pc\")", "to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1", "__init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set", "the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats =", "the original TXID we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic", "[rights] Copyright 2020 brianddk at github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0", "PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160,", "'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple", "privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get the serialized txn and compute", "in dir(opcodes) else push_script(i) for i in s]) # Electrum assumes P2SH is", "lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex' in", "we intended if txid != otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps", "[btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93 # [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/ #", "1 sats = 9999 script_type = 'p2sh' binzero = 2**32 sequence = binzero", "assumes P2SH is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self)", "use \"legacy\" sig processing # This is the original TXID we are trying", "txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin],", "txid txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived at where", "# The basic bitcoinlib utility scripts x = lambda h: bytes.fromhex(h) lx =", "if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very", "txn = tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived at where we", "dir(opcodes) else push_script(i) for i in s]) # Electrum assumes P2SH is multisig,", "P2SH is multisig, this subclass corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod", "compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction", "corrects that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') ->", "import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto", "to be reverted\") Print(\"See: https://redd.it/jf97pc\") # Display results print(\"pubk:\", pubkey) print(\"priv:\", b2x(privkey)) print(\"txid:\",", "to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN wif", "!= otxid: print(\"Did not achive target TXID hash\") print(\"Perhaps R-value hashing needs to", "b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile =", "python -m pip install electrum # [note] with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read())", "Ensure we arrived at where we intended if txid != otxid: print(\"Did not", "= deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence", "PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([", "as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import", "in s]) # Electrum assumes P2SH is multisig, this subclass corrects that class", "opcodes, push_script from electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc", "lambda h: bytes.fromhex(h)[::-1] b2x = lambda b: (b.hex() if 'hex' in dir(b) else", "in dir(b) else hex(b)).replace('0x','') b2lx = lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin", "push_script from electrum.crypto import hash_160, sha256d from electrum.constants import set_testnet from electrum.ecc import", "def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet set_testnet() #", "txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey,", "TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import", "tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get the serialized txn and", "Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1", "'p2sh' binzero = 2**32 sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees", "lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','') b2lx = lambda b:", "compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout)", "b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile = lambda s: \"\".join([", "sig processing # This is the original TXID we are trying to hit", "'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees)", "Basic constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout", "The basic bitcoinlib utility scripts x = lambda h: bytes.fromhex(h) lx = lambda", "electrum.constants import set_testnet from electrum.ecc import ECPrivkey # The basic bitcoinlib utility scripts", "that class P2SHPartialTransaction(PartialTransaction): def __init__(self): PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str:", "= 2**32 sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats", "= binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees = sats - 200 locktime", "from electrum.ecc import ECPrivkey # The basic bitcoinlib utility scripts x = lambda", "- 200 locktime = 1602565200 # Build the Transaction Input _, privkey, compressed", "# Very simple bitcoin script comiler compile = lambda s: \"\".join([ opcodes[i].hex() if", "tx.serialize() txid = b2lx(sha256d(x(txn))) # Ensure we arrived at where we intended if", "# [rights] Copyright 2020 brianddk at github https://github.com/brianddk # [license] Apache 2.0 License", "# Build and sign the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version =", "is the original TXID we are trying to hit otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' #", "= 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93' # Basic constants to build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid", "_, privkey, compressed = deserialize_privkey(wif) pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed) prevout = TxOutpoint(txid=txid, out_idx=vout) txin", "Set testnet set_testnet() # I removed the R-value grinding to use \"legacy\" sig", "we arrived at where we intended if txid != otxid: print(\"Did not achive", "prevout = TxOutpoint(txid=txid, out_idx=vout) txin = PartialTxInput(prevout=prevout) txin.nsequence = sequence txin.script_type = script_type", "PartialTxOutput, PartialTransaction from electrum.bitcoin import deserialize_privkey, opcodes, push_script from electrum.crypto import hash_160, sha256d", "build the TXNIN wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N' txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434') vout = 1 sats", "x = lambda h: bytes.fromhex(h) lx = lambda h: bytes.fromhex(h)[::-1] b2x = lambda", "= P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0, privkey) txin.script_sig =", "with open(r\"..\\reddit\\python\\hodl.py\", 'r') as s: exec(s.read()) from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction", "expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction Output", "simple bitcoin script comiler compile = lambda s: \"\".join([ opcodes[i].hex() if i in", "binzero = 2**32 sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk' sats_less_fees =", "https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt # [txid]", "sats - 200 locktime = 1602565200 # Build the Transaction Input _, privkey,", "lambda b: b[::-1].hex().replace('0x','') # Very simple bitcoin script comiler compile = lambda s:", "= sequence txin.script_type = script_type expiry = b2x(lx(b2x(locktime))) redeem_script = compile([ expiry, 'OP_CHECKLOCKTIMEVERIFY',", "= lambda s: \"\".join([ opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i", "License https://www.apache.org/licenses/LICENSE-2.0 # [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py # [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj # [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt #", "the transaction tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime) tx.version = 1 sig = tx.sign_txin(0,", "2020 brianddk at github https://github.com/brianddk # [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0 # [repo]", "= 'p2sh' binzero = 2**32 sequence = binzero - 3 address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk'", "PartialTransaction.__init__(self) @classmethod def get_preimage_script(cls, txin: 'PartialTxInput') -> str: return b2x(txin.redeem_script) # Set testnet", "# This is the original TXID we are trying to hit otxid =", "pubkey, 'OP_CHECKSIG']) txin.redeem_script = x(redeem_script) # Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address,", "= tx.sign_txin(0, privkey) txin.script_sig = x(compile([sig , redeem_script])) # Get the serialized txn", "1602565200 # Build the Transaction Input _, privkey, compressed = deserialize_privkey(wif) pubkey =", "= x(redeem_script) # Build the Transaction Output txout = PartialTxOutput.from_address_and_value(address, sats_less_fees) # Build" ]
[ "img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b]) plt.subplot(121),plt.imshow(img) #颜色有失真 plt.subplot(122),plt.imshow(img2) #颜色正常", "as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 =", ": wudan import cv2 import numpy as pd from matplotlib import pyplot as", "@Date : 2019-03-14 # @Author : wudan import cv2 import numpy as pd", "CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b]) plt.subplot(121),plt.imshow(img) #颜色有失真", "pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2", "utf-8 -*- # @File : image_rw_matplot.py # @Date : 2019-03-14 # @Author :", "image_rw_matplot.py # @Date : 2019-03-14 # @Author : wudan import cv2 import numpy", "-*- # @File : image_rw_matplot.py # @Date : 2019-03-14 # @Author : wudan", "= cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b]) plt.subplot(121),plt.imshow(img) #颜色有失真 plt.subplot(122),plt.imshow(img2) #颜色正常 plt.show()", "# @Date : 2019-03-14 # @Author : wudan import cv2 import numpy as", "-*- coding: utf-8 -*- # @File : image_rw_matplot.py # @Date : 2019-03-14 #", "\"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b]) plt.subplot(121),plt.imshow(img) #颜色有失真 plt.subplot(122),plt.imshow(img2)", "<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # @File : image_rw_matplot.py #", "matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r =", "cv2 import numpy as pd from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式", "import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img)", "wudan import cv2 import numpy as pd from matplotlib import pyplot as plt", "numpy as pd from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img", "#!/usr/bin/env python # -*- coding: utf-8 -*- # @File : image_rw_matplot.py # @Date", "@Author : wudan import cv2 import numpy as pd from matplotlib import pyplot", "pd from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1)", "@File : image_rw_matplot.py # @Date : 2019-03-14 # @Author : wudan import cv2", ": 2019-03-14 # @Author : wudan import cv2 import numpy as pd from", "python # -*- coding: utf-8 -*- # @File : image_rw_matplot.py # @Date :", "# @File : image_rw_matplot.py # @Date : 2019-03-14 # @Author : wudan import", "\"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b]) plt.subplot(121),plt.imshow(img)", "coding: utf-8 -*- # @File : image_rw_matplot.py # @Date : 2019-03-14 # @Author", "import numpy as pd from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\"", "from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r", "plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img = cv2.imread('zhuzhu_1.jpeg',1) b,g,r = cv2.split(img) img2 = cv2.merge([r,g,b])", "2019-03-14 # @Author : wudan import cv2 import numpy as pd from matplotlib", ": image_rw_matplot.py # @Date : 2019-03-14 # @Author : wudan import cv2 import", "# -*- coding: utf-8 -*- # @File : image_rw_matplot.py # @Date : 2019-03-14", "import cv2 import numpy as pd from matplotlib import pyplot as plt \"\"\"", "# @Author : wudan import cv2 import numpy as pd from matplotlib import", "as pd from matplotlib import pyplot as plt \"\"\" CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式 \"\"\" img =" ]
[ "times: List[List[int]], N: int, K: int) -> int: import heapq from collections import", "heapq from collections import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap)", "currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1]))", "while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited: for edge in", "edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N: return -1 return time ​", "edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in", "K: int) -> int: import heapq from collections import defaultdict createGraph=defaultdict(list) for i", "def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: import heapq from", "collections import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set()", "Solution: def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: import heapq", "print(currEdge) if currEdge[1] not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0])", "createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge", "times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while", "for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in", "in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited:", "not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N: return", "createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited: for", "for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N: return -1 return time", "createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap:", "in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N: return -1", "class Solution: def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: import", "import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K)", "# print(currEdge) if currEdge[1] not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1])", "in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge)", "i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]:", "defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for", "visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N: return -1 return", "visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1]", "time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge)", "heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited: for edge in createGraph[currEdge[1]]:", "List[List[int]], N: int, K: int) -> int: import heapq from collections import defaultdict", "for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not", "int, K: int) -> int: import heapq from collections import defaultdict createGraph=defaultdict(list) for", "-> int: import heapq from collections import defaultdict createGraph=defaultdict(list) for i in times:", "N: int, K: int) -> int: import heapq from collections import defaultdict createGraph=defaultdict(list)", "networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int: import heapq from collections", "heap=[] heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap)", "visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if", "if currEdge[1] not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if", "int: import heapq from collections import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1]))", "from collections import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[] heapq.heapify(heap) time=float('-inf')", "heapq.heapify(heap) time=float('-inf') visited=set() visited.add(K) for edge in createGraph[K]: heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) #", "heapq.heappush(heap,edge) while heap: currEdge=heapq.heappop(heap) # print(currEdge) if currEdge[1] not in visited: for edge", "currEdge[1] not in visited: for edge in createGraph[currEdge[1]]: heapq.heappush(heap,(edge[0]+currEdge[0],edge[1])) visited.add(currEdge[1]) time=max(time,currEdge[0]) if len(visited)!=N:", "int) -> int: import heapq from collections import defaultdict createGraph=defaultdict(list) for i in", "import heapq from collections import defaultdict createGraph=defaultdict(list) for i in times: createGraph[i[0]].append((i[2],i[1])) heap=[]" ]
[ "<reponame>m4ta1l/deal # app from .asserts import get_asserts from .common import get_name from .contracts", ".asserts import get_asserts from .common import get_name from .contracts import get_contracts from .exceptions", "get_contracts from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals", "from .prints import get_prints from .returns import get_returns, has_returns from .value import get_value", "get_imports from .pre import get_pre from .prints import get_prints from .returns import get_returns,", ".globals import get_globals from .imports import get_imports from .pre import get_pre from .prints", "import get_returns, has_returns from .value import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs',", "import get_name from .contracts import get_contracts from .exceptions import get_exceptions from .exceptions_stubs import", "[ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre', 'get_prints', 'get_returns', 'get_value', 'has_returns',", "import get_globals from .imports import get_imports from .pre import get_pre from .prints import", "# app from .asserts import get_asserts from .common import get_name from .contracts import", "get_prints from .returns import get_returns, has_returns from .value import get_value __all__ = [", "import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals from .imports import", "import get_exceptions_stubs from .globals import get_globals from .imports import get_imports from .pre import", "from .contracts import get_contracts from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from", ".contracts import get_contracts from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals", "get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre', 'get_prints',", ".common import get_name from .contracts import get_contracts from .exceptions import get_exceptions from .exceptions_stubs", "import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre',", "get_name from .contracts import get_contracts from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs", "from .asserts import get_asserts from .common import get_name from .contracts import get_contracts from", "get_pre from .prints import get_prints from .returns import get_returns, has_returns from .value import", ".pre import get_pre from .prints import get_prints from .returns import get_returns, has_returns from", "app from .asserts import get_asserts from .common import get_name from .contracts import get_contracts", "from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals from", ".exceptions_stubs import get_exceptions_stubs from .globals import get_globals from .imports import get_imports from .pre", ".imports import get_imports from .pre import get_pre from .prints import get_prints from .returns", "from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals from .imports import get_imports from", ".prints import get_prints from .returns import get_returns, has_returns from .value import get_value __all__", ".value import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name',", "from .common import get_name from .contracts import get_contracts from .exceptions import get_exceptions from", "= [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre', 'get_prints', 'get_returns', 'get_value',", "import get_prints from .returns import get_returns, has_returns from .value import get_value __all__ =", "__all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre', 'get_prints', 'get_returns',", "'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports', 'get_name', 'get_pre', 'get_prints', 'get_returns', 'get_value', 'has_returns', ]", "import get_pre from .prints import get_prints from .returns import get_returns, has_returns from .value", "from .value import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals', 'get_imports',", "from .globals import get_globals from .imports import get_imports from .pre import get_pre from", "get_asserts from .common import get_name from .contracts import get_contracts from .exceptions import get_exceptions", "from .pre import get_pre from .prints import get_prints from .returns import get_returns, has_returns", "import get_imports from .pre import get_pre from .prints import get_prints from .returns import", "has_returns from .value import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions', 'get_globals',", "import get_contracts from .exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import", "get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals from .imports import get_imports", "from .returns import get_returns, has_returns from .value import get_value __all__ = [ 'get_asserts',", "get_globals from .imports import get_imports from .pre import get_pre from .prints import get_prints", "get_returns, has_returns from .value import get_value __all__ = [ 'get_asserts', 'get_contracts', 'get_exceptions_stubs', 'get_exceptions',", ".returns import get_returns, has_returns from .value import get_value __all__ = [ 'get_asserts', 'get_contracts',", "get_exceptions_stubs from .globals import get_globals from .imports import get_imports from .pre import get_pre", "import get_asserts from .common import get_name from .contracts import get_contracts from .exceptions import", ".exceptions import get_exceptions from .exceptions_stubs import get_exceptions_stubs from .globals import get_globals from .imports", "from .imports import get_imports from .pre import get_pre from .prints import get_prints from" ]
[ "(c) <NAME>, <NAME>. All rights reserved. ## See LICENSE file for details. ##", "disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface", "surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the", "1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.')", "== 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if not self.Actor:", "< arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget", "$ ## Version: $Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>. All rights", "range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue <", "the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a", "self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error:", "obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the widget, but immediately after", "None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0", "point or cell data array'], ['InsideValue','inside','float',1,'','value with which the surface within the contour", "rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i", "pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer", "self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a", "a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or", "self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData()", "in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue", "= triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array", "copyright notices for more information. from __future__ import absolute_import #NEEDS TO STAY AS", "self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i)", "contourValue < 0.0 and selectionValue < 0.0) or (contourValue < 0.0 and selectionValue", "self.Representation == 'surface': representation = 'edges' elif self.Representation == 'edges': representation = 'wireframe'", "the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars are", "within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the contour", "surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle", "self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn()", "'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def", "closed contour on a surface and generate a new tag inside it') self.SetInputMembers([", "< 0.0 and selectionValue < 0.0) or (contourValue < 0.0 and selectionValue <", "Python ## Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $ ##", "= None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor =", "self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)]", "Language: Python ## Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $", "All rights reserved. ## See LICENSE file for details. ## This software is", "2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>.", "$RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date: 2006/05/26 12:35:13 $ ##", "class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer =", "self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj):", "obj): if not self.Actor: return if self.Representation == 'surface': representation = 'edges' elif", "<NAME>. All rights reserved. ## See LICENSE file for details. ## This software", "region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ])", "also when a tag array already exists in the input surface'], ['ArrayName','array','str',1,'','the name", "self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData:", "TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import sys from vmtk import", "renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation ==", "inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'],", "## See LICENSE file for details. ## This software is distributed WITHOUT ANY", "COMPATIBILITY import vtk import sys from vmtk import vmtkrenderer from vmtk import pypes", "mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)] if self.InsideValue", "self.Actor = None self.ContourWidget = None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue", "or cell data array'], ['InsideValue','inside','float',1,'','value with which the surface within the contour is", "= 1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour", "a closed contour on a surface and generate a new tag inside it')", "the array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the", "= selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0 and", "== 'edges': representation = 'wireframe' elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation)", "vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor)", "0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator()", "for more information. from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL", "= 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData =", "['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff()", "0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1", "vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface =", "def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges':", "self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue:", "the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the contour is", "outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array", "['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value with which", "Program: VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date:", "generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the", "vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1,", "self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper()", "or (contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <=", "self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor = None self.ContourWidget", "after it is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def", "== None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints())", "self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer", "selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter')", "= self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor)", "## Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $ ## Copyright", "file for details. ## This software is distributed WITHOUT ANY WARRANTY; without even", "self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable", "== 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation ==", "self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue < 0.0) or (contourValue <", "if self.Representation == 'surface': representation = 'edges' elif self.Representation == 'edges': representation =", "sys from vmtk import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self):", "self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i)", "= vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface", "self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self,", "self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0)", "None self.ContourWidget = None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue = 1.0", "representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation", "self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName", "selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface =", "= pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if", "for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter =", "#!/usr/bin/env python ## Program: VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python", "ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR", "= vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer()", "contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already exists", "representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn()", "else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)] if self.InsideValue >", "self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self,", "= 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion =", "vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()):", "def RepresentationCallback(self, obj): if not self.Actor: return if self.Representation == 'surface': representation =", "< 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue)", "== 'surface': representation = 'edges' elif self.Representation == 'edges': representation = 'wireframe' elif", "or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue)", "from vmtk import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self)", "triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array ==", "mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e", "## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date: 2006/05/26 12:35:13", "self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array", "input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value with which the", "selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue", "contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if", "data array'], ['InsideValue','inside','float',1,'','value with which the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value", "self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1", "None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor = None", "else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName)", "self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main = pypes.pypeMain()", "self.Surface = None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor", "again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface ==", "representation = 'edges' elif self.Representation == 'edges': representation = 'wireframe' elif self.Representation ==", "1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on", "self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback)", "of the self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest", "= vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array)", "pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn()", "vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in", "self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe()", "contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue < 0.0) or", "Py2-3 COMPATIBILITY import vtk import sys from vmtk import vmtkrenderer from vmtk import", "FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more information.", "= vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback)", "python ## Program: VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ##", "surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag", "the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside", "contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified()", "'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if not self.Actor: return", "= selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface", "self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main = pypes.pypeMain() main.Arguments = sys.argv main.Execute()", "the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already", "points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point =", "is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY", "= self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue =", "Copyright (c) <NAME>, <NAME>. All rights reserved. ## See LICENSE file for details.", "ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds())", "with which the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also", "import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import", "self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if not self.Actor: return if self.Representation", "self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1)", "= vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId)", "'edges': representation = 'wireframe' elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render()", "self.OwnRenderer = 0 self.Representation = 'edges' self.Actor = None self.ContourWidget = None self.Interpolator", "if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self,", "MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices", "surface and generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle", "representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds =", "pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId", "['InsideValue','inside','float',1,'','value with which the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which", "obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for", "= self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else:", "0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if", "elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if", "vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer =", "## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied", "## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE.", "Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $ ## Copyright (c)", "None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface and generate a new", "for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if", "selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue", "self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion", "## Copyright (c) <NAME>, <NAME>. All rights reserved. ## See LICENSE file for", "DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the widget, but immediately", "reserved. ## See LICENSE file for details. ## This software is distributed WITHOUT", "warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above", "immediately after it is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1)", "self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize()", "it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value", "import sys from vmtk import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def", "filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already exists in the", "selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData:", "self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points =", "def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the widget, but", "self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance", "with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']", "'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds)", "1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a", "else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.') if not", "self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput()", "BUG: enable the widget, but immediately after it is disabled again if self.ContourWidget.GetEnabled()", "if self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells())", "__future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY", "a surface and generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],", "self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if", "#NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import", "None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor = None self.ContourWidget = None", "mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange", "self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]:", "= pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if", "if not self.Actor: return if self.Representation == 'surface': representation = 'edges' elif self.Representation", "pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance:", "self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points", "rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start", "not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter()", "if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper", "= self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue < 0.0) or (contourValue", "< 0.0) or (contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if", "= vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i", "arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor()", "the widget, but immediately after it is disabled again if self.ContourWidget.GetEnabled() == 1:", "if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars)", "self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer:", "pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\")", "self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if not self.Actor: return if", "self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the", "def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation", "= 'wireframe' elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self,", "<NAME>, <NAME>. All rights reserved. ## See LICENSE file for details. ## This", "= self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper)", "above copyright notices for more information. from __future__ import absolute_import #NEEDS TO STAY", "0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface)", "AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import sys from vmtk", "self.Surface == None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize()", "= 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface", "VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date: 2006/05/26", "elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor)", "mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1]", "self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if", "for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue", "0.0 and selectionValue < 0.0) or (contourValue < 0.0 and selectionValue < contourValue):", "if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif", "pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for", "software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of", "['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance", "self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or", "elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep", "self.Representation == 'edges': representation = 'wireframe' elif self.Representation == 'wireframe': representation = 'surface'", "and selectionValue < 0.0) or (contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue)", "< contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter')", "0 self.Representation = 'edges' self.Actor = None self.ContourWidget = None self.Interpolator = None", "self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if", "self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper =", "e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue <", "## Version: $Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>. All rights reserved.", "selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface)", "LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import sys from vmtk import vmtkrenderer", "selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars", "points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update()", "self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter')", "triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName)", "= vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in", "tag array already exists in the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array", "$ ## Copyright (c) <NAME>, <NAME>. All rights reserved. ## See LICENSE file", "without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR", "array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output", "vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point)", "in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]:", "= vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing", "TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import sys", "the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the", "== 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation())", "contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the contour is filled'],", "= representation def RepresentationCallback(self, obj): if not self.Actor: return if self.Representation == 'surface':", "'wireframe' elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj):", "pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation = 'edges'", "self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface and generate", "= vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else:", "1.9 $ ## Copyright (c) <NAME>, <NAME>. All rights reserved. ## See LICENSE", "self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value with", "mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for", "pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue =", "1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array", "FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more", "= 0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array =", "...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer:", "self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff()", "which the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the surface", "the above copyright notices for more information. from __future__ import absolute_import #NEEDS TO", "['OutsideValue','outside','float',1,'','value with which the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value", "selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter =", "is filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite", "the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See", "self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array =", "= None self.ContourWidget = None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue =", "[e for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif", "import vtk import sys from vmtk import vmtkrenderer from vmtk import pypes class", "= None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface and generate a", "self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array)", "vtk import sys from vmtk import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript):", "point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion()", "self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else:", "self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface)", "Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer =", "selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update()", "contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if", "self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): #", "already exists in the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the", "selectionValue < 0.0) or (contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else:", "= [e for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue", "and generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing", "the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ])", "is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if", "outside value also when a tag array already exists in the input surface'],", "widget, but immediately after it is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0)", "RepresentationCallback(self, obj): if not self.Actor: return if self.Representation == 'surface': representation = 'edges'", "tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data", "self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue < 0.0)", "vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array", "See the above copyright notices for more information. from __future__ import absolute_import #NEEDS", "not self.Actor: return if self.Representation == 'surface': representation = 'edges' elif self.Representation ==", "= 1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw", "0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array = None", "A PARTICULAR ## PURPOSE. See the above copyright notices for more information. from", "= None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance =", "filled'], ['OutsideValue','outside','float',1,'','value with which the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside", "with which the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with which the", "$Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>. All rights reserved. ## See", "of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright", "elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation", "surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if", "['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([", "self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate()", "from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3", "for details. ## This software is distributed WITHOUT ANY WARRANTY; without even ##", "enable the widget, but immediately after it is disabled again if self.ContourWidget.GetEnabled() ==", "self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else:", "= self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if", "Version: $Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>. All rights reserved. ##", "array'], ['InsideValue','inside','float',1,'','value with which the surface within the contour is filled'], ['OutsideValue','outside','float',1,'','value with", "= None self.OwnRenderer = 0 self.Representation = 'edges' self.Actor = None self.ContourWidget =", "in the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars", "self.Actor: return if self.Representation == 'surface': representation = 'edges' elif self.Representation == 'edges':", "generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point", "> arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor", "triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName)", "arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget()", "notices for more information. from __future__ import absolute_import #NEEDS TO STAY AS TOP", "PURPOSE. See the above copyright notices for more information. from __future__ import absolute_import", "self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData()", "= 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList()", "self.Representation = 'edges' self.Actor = None self.ContourWidget = None self.Interpolator = None self.OutsideValue", "a tag array already exists in the input surface'], ['ArrayName','array','str',1,'','the name of the", "## Program: VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date:", "self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion()", "$Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9 $ ## Copyright (c) <NAME>,", "'edges' elif self.Representation == 'edges': representation = 'wireframe' elif self.Representation == 'wireframe': representation", "if (not contourValue < 0.0 and selectionValue < 0.0) or (contourValue < 0.0", "representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__':", "#self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main = pypes.pypeMain() main.Arguments =", "self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0]", "['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value with which the surface within", "def Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer", "or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for", "self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing')", "and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData:", "are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with", "distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def", "more information. from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE", "representation def RepresentationCallback(self, obj): if not self.Actor: return if self.Representation == 'surface': representation", "self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData:", "<= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize()", "MODULE FOR Py2-3 COMPATIBILITY import vtk import sys from vmtk import vmtkrenderer from", "PARTICULAR ## PURPOSE. See the above copyright notices for more information. from __future__", "vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator", "contour on a surface and generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the", "pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change", "This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty", "'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface and", "rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback)", "but immediately after it is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else:", "self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars =", "= 'edges' self.Actor = None self.ContourWidget = None self.Interpolator = None self.OutsideValue =", "InteractCallback(self, obj): # BUG: enable the widget, but immediately after it is disabled", "]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation == 'surface':", "if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None:", "vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour", "self.Representation = representation def RepresentationCallback(self, obj): if not self.Actor: return if self.Representation ==", "elif self.Representation == 'edges': representation = 'wireframe' elif self.Representation == 'wireframe': representation =", "self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main", "from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer", "which the surface outside the contour is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when", "1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData = 1", "arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor =", "self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2,", "'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds", "vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date: 2006/05/26 12:35:13 $ ## Version:", "0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName = 'CellEntityIds'", "self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render()", "= vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer)", "self.CellData = 1 self.ArrayName = 'CellEntityIds' self.Array = None self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed", "pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface", "== 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation", "self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in self.Array.GetValueRange(0)] if", "= 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData = 1 self.ArrayName =", "on a surface and generate a new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input", "the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation):", "implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the", "vmtk import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface", "self.ContourWidget = None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue", "selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn()", "if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e for e in", "if self.Surface == None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer()", "name of the self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the", "self.InsideValue = 1.0 self.OverwriteOutsideValue = 0 self.ComputeDistance = 0 self.TagSmallestRegion = 1 self.CellData", "STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk import sys from", "import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None", "representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation = representation def RepresentationCallback(self, obj): if not", "in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface)", "self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate", "tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to", "absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk", "pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn()", "self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter", "selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if", "vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator) self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete", "self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG:", "if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter =", "is filled'], ['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already exists in", "self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer =", "__init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer = 0 self.Representation =", "points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter", "= None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue =", "Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language: Python ## Date: $Date: 2006/05/26 12:35:13 $", "return if self.Representation == 'surface': representation = 'edges' elif self.Representation == 'edges': representation", "self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData:", "self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1])", "'edges' self.Actor = None self.ContourWidget = None self.Interpolator = None self.OutsideValue = 0.0", "selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter')", "vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else:", "vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None", "triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array =", "arrayRange = [e for e in self.Array.GetValueRange(0)] if self.InsideValue > arrayRange[1]: arrayRange[1] =", "self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if", "information. from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR", "rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator() self.Interpolator.GetPolys().AddItem(self.Surface) rep.SetLineInterpolator(self.Interpolator)", "vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.vmtkRenderer = None self.OwnRenderer = 0", "0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def", "self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface)", "= 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData:", "representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe': self.Actor.GetProperty().SetRepresentationToWireframe() self.Actor.GetProperty().EdgeVisibilityOff() self.Representation =", "= vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars =", "== 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error: no", "if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars", "## Language: Python ## Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision: 1.9", "= 'edges' elif self.Representation == 'edges': representation = 'wireframe' elif self.Representation == 'wireframe':", "array already exists in the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where", "== None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer", "else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray()", "i in range(self.Array.GetNumberOfTuples()): selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not", "def InteractCallback(self, obj): # BUG: enable the widget, but immediately after it is", "(not contourValue < 0.0 and selectionValue < 0.0) or (contourValue < 0.0 and", "i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData()", "'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOn() elif representation == 'wireframe':", "pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()):", "self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def ScalarsCallback(self, obj): rep =", "## PURPOSE. See the above copyright notices for more information. from __future__ import", "it is disabled again if self.ContourWidget.GetEnabled() == 1: self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self):", "WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS", "self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update() self.Surface = triangleFilter.GetOutput() if self.CellData: self.Array =", "vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange = [e", "self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface()", "def ScalarsCallback(self, obj): rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) pointIds = vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints()", "surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation", "None self.Interpolator = None self.OutsideValue = 0.0 self.InsideValue = 1.0 self.OverwriteOutsideValue = 0", "no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self)", "self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the widget, but immediately after it", "['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already exists in the input", "self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest", "vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion: selectionFilter.SetSelectionModeToSmallestRegion() else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars()", "['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging", "writing point or cell data array'], ['InsideValue','inside','float',1,'','value with which the surface within the", "cell data array'], ['InsideValue','inside','float',1,'','value with which the surface within the contour is filled'],", "$ ## Language: Python ## Date: $Date: 2006/05/26 12:35:13 $ ## Version: $Revision:", "rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn() rep.SetPointPlacer(pointPlacer) self.Interpolator =", "None or self.OverwriteOutsideValue: self.Array = vtk.vtkDoubleArray() self.Array.SetNumberOfComponents(1) if self.CellData: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells()) else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName)", "exists in the input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the generated", "SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation == 'edges': self.Actor.GetProperty().SetRepresentationToSurface()", "value also when a tag array already exists in the input surface'], ['ArrayName','array','str',1,'','the", "See LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY;", "if self.CellData: self.Array = self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None", "self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render()", "self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(self.Surface) triangleFilter.Update()", "self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep", "the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill", "distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or", "rights reserved. ## See LICENSE file for details. ## This software is distributed", "import vmtkrenderer from vmtk import pypes class vmtkSurfaceRegionDrawing(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface =", "0.0) or (contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue", "self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj): # BUG: enable the widget,", "= 0 self.Representation = 'edges' self.Actor = None self.ContourWidget = None self.Interpolator =", "= vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation())", "WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A", "or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external", "self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point", "= vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId =", "surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell data array'], ['InsideValue','inside','float',1,'','value with which the surface", "pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput() selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter') for i in range(self.Array.GetNumberOfTuples()): selectionValue", "else: selectionFilter.SetSelectionModeToLargestRegion() selectionFilter.Update() selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars() selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData()", "self.ContourWidget.EnabledOn() self.InputInfo(\"Drawing contour ...\\n\") self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback) self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback)", "where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'],", "]) def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif representation ==", "new tag inside it') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['CellData','celldata','bool',1,'','toggle writing point or cell", "largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']", "selectionScalars.SetName('SelectionFilter') if self.CellData: self.Surface.GetPointData().AddArray(selectionScalars) pointDataToCellDataFilter = vtk.vtkPointDataToCellData() pointDataToCellDataFilter.SetInputData(self.Surface) pointDataToCellDataFilter.PassPointDataOn() pointDataToCellDataFilter.Update() self.Surface = pointDataToCellDataFilter.GetPolyDataOutput()", "= self.Surface.GetCellData().GetArray(self.ArrayName) else: self.Array = self.Surface.GetPointData().GetArray(self.ArrayName) if self.Array == None or self.OverwriteOutsideValue: self.Array", "scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array", "self.SetScriptName('vmtksurfaceregiondrawing') self.SetScriptDoc('draw a closed contour on a surface and generate a new tag", "scalars',self.ScalarsCallback) self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main =", "range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points)", "input surface'], ['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars are stored'],", "self.SetScriptDoc('draw a closed contour on a surface and generate a new tag inside", "if self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] =", "self.InsideValue > arrayRange[1]: arrayRange[1] = self.InsideValue elif self.InsideValue < arrayRange[0]: arrayRange[0] = self.InsideValue", "Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer = 1 self.vmtkRenderer.RegisterScript(self) triangleFilter", "if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName)", "<reponame>michelebucelli/vmtk<filename>vmtkScripts/vmtksurfaceregiondrawing.py #!/usr/bin/env python ## Program: VMTK ## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $ ## Language:", "'surface': representation = 'edges' elif self.Representation == 'edges': representation = 'wireframe' elif self.Representation", "even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ##", "obj): # BUG: enable the widget, but immediately after it is disabled again", "= vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface) mapper.ScalarVisibilityOn() if self.CellData: self.Surface.GetCellData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUseCellData() else: self.Surface.GetPointData().SetActiveScalars(self.ArrayName) mapper.SetScalarModeToUsePointData() arrayRange =", "pointIds.GetId(i) point = self.Surface.GetPoint(pointId) points.SetPoint(i,point) selectionFilter = vtk.vtkSelectPolyData() selectionFilter.SetInputData(self.Surface) selectionFilter.SetLoop(points) selectionFilter.GenerateSelectionScalarsOn() if self.TagSmallestRegion:", "when a tag array already exists in the input surface'], ['ArrayName','array','str',1,'','the name of", "rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0) rep.GetLinesProperty().SetLineWidth(3.0) pointPlacer = vtk.vtkPolygonalSurfacePointPlacer() pointPlacer.AddProp(self.Actor) pointPlacer.GetPolys().AddItem(self.Surface) pointPlacer.SnapToClosestPointOn()", "else: if selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def", "None: self.PrintError('Error: no Surface.') if not self.vmtkRenderer: self.vmtkRenderer = vmtkrenderer.vmtkRenderer() self.vmtkRenderer.Initialize() self.OwnRenderer =", "selectionValue = selectionScalars.GetTuple1(i) if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0", "vtk.vtkIdList() self.Interpolator.GetContourPointIds(rep,pointIds) points = vtk.vtkPoints() points.SetNumberOfPoints(pointIds.GetNumberOfIds()) for i in range(pointIds.GetNumberOfIds()): pointId = pointIds.GetId(i)", "self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation()) rep.GetLinesProperty().SetColor(1, 0.2, 0)", "arrayRange[0]: arrayRange[0] = self.InsideValue self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget =", "contour',self.DeleteContourCallback) #self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main = pypes.pypeMain() main.Arguments", "12:35:13 $ ## Version: $Revision: 1.9 $ ## Copyright (c) <NAME>, <NAME>. All", "the self.Array where the generated scalars are stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or", "if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj): self.ContourWidget.Initialize() def InteractCallback(self, obj):", "output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation == 'surface': self.Actor.GetProperty().SetRepresentationToSurface() self.Actor.GetProperty().EdgeVisibilityOff() elif", "LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY; without", "(contourValue < 0.0 and selectionValue < contourValue): self.Array.SetTuple1(i,selectionValue) else: if selectionValue <= 0.0:", "details. ## This software is distributed WITHOUT ANY WARRANTY; without even ## the", "selectionValue <= 0.0: self.Array.SetTuple1(i,self.InsideValue) if self.CellData: self.Surface.GetPointData().RemoveArray('SelectionFilter') self.Surface.GetCellData().RemoveArray('SelectionFilter') self.Surface.Modified() self.ContourWidget.Initialize() def DeleteContourCallback(self, obj):", "stored'], ['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the", "smallest or largest region'], ['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'],", "else: self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints()) self.Array.SetName(self.ArrayName) self.Array.FillComponent(0,self.OutsideValue) if self.CellData: self.Surface.GetCellData().AddArray(self.Array) else: self.Surface.GetPointData().AddArray(self.Array) mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(self.Surface)", "self.Actor = vtk.vtkActor() self.Actor.SetMapper(mapper) self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1]) self.vmtkRenderer.Renderer.AddActor(self.Actor) self.vmtkRenderer.Render() self.ContourWidget = vtk.vtkContourWidget() self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor) rep =", "self.ContourWidget.SetEnabled(0) else: self.ContourWidget.SetEnabled(1) def Execute(self): if self.Surface == None: self.PrintError('Error: no Surface.') if", "interaction',self.InteractCallback) self.vmtkRenderer.Render() if self.OwnRenderer: self.vmtkRenderer.Deallocate() if __name__=='__main__': main = pypes.pypeMain() main.Arguments = sys.argv", "to the contour'], ['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self,", "['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'] ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'] ]) def SetSurfaceRepresentation(self, representation): if representation", "if self.ComputeDistance: contourValue = self.Array.GetTuple1(i) if (not contourValue < 0.0 and selectionValue <", "# BUG: enable the widget, but immediately after it is disabled again if", "FOR Py2-3 COMPATIBILITY import vtk import sys from vmtk import vmtkrenderer from vmtk", "representation = 'wireframe' elif self.Representation == 'wireframe': representation = 'surface' self.SetSurfaceRepresentation(representation) self.vmtkRenderer.RenderWindow.Render() def" ]
[ "def harness(setting, Model, args): opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif", "harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not", "= torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"})", "Graphcore Ltd. All rights reserved. import pytest import torch import poptorch def harness(setting,", "\"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return x /", "x > y x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10))", "y x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model,", "test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting,", "[x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on", "\"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return x / y", "#!/usr/bin/env python3 # Copyright (c) 2020 Graphcore Ltd. All rights reserved. import pytest", "def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return x * y x", "point exception not supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x):", "torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not", "def forward(self, x, y): return x / y x = torch.ones(10, 10) y", "exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module):", "test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x = torch.Tensor([3800, 4203]) harness(setting,", "y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\",", "x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x,", "10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),", "@pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return", "= torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y])", "torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not", "\"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return x * y", "10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on", "Model(torch.nn.Module): def forward(self, x, y): return x / y x = torch.ones(10, 10)", "Model(torch.nn.Module): def forward(self, x, y): return x * y x = torch.zeros(10, 10)", "opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating", "def forward(self, x, y): return x * y x = torch.zeros(10, 10) y", "\"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_nan(setting):", "= torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point", "model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y):", "point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class", "torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating", "point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class", "not supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x)", "supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self,", "= torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),", "10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point", "{\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x", "= torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating", "test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return x / y x =", "y): return x > y x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10),", "y x = torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model, [x, y])", "elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\":", "poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args)", "Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported", "= torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y])", "(c) 2020 Graphcore Ltd. All rights reserved. import pytest import torch import poptorch", "opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting ==", "/ y x = torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model, [x,", "y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on", "torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception", "model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return", "exception not supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return", "10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),", "forward(self, x, y): return x > y x = torch.ones(10, 10) y =", "model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y):", "torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\",", "def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x = torch.Tensor([3800, 4203])", "on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x =", "[x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\",", "{\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return x", "import torch import poptorch def harness(setting, Model, args): opts = poptorch.Options() if setting", "\"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args)", "\"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting", "10) y = torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point", "class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model,", "import poptorch def harness(setting, Model, args): opts = poptorch.Options() if setting == \"true\":", "\"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_nan(setting): class", "Model, args): opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting ==", "x * y x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10))", "\"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x =", "reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting):", "y): return x / y x = torch.ones(10, 10) y = torch.zeros(10, 10)", "-2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception", "\"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_ovf(setting): class", "= poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not", "\"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_ovf(setting):", "@pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\")", "Copyright (c) 2020 Graphcore Ltd. All rights reserved. import pytest import torch import", "forward(self, x, y): return x * y x = torch.zeros(10, 10) y =", "torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating", "poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def", "reserved. import pytest import torch import poptorch def harness(setting, Model, args): opts =", "import pytest import torch import poptorch def harness(setting, Model, args): opts = poptorch.Options()", "supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x", "exception not supported on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y):", "rights reserved. import pytest import torch import poptorch def harness(setting, Model, args): opts", "x, y): return x > y x = torch.ones(10, 10) y = torch.div(torch.zeros(10,", "forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\",", "torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported", "x = torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not", "poptorch def harness(setting, Model, args): opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True)", "return x / y x = torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting,", "setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts)", "poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\",", "@pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return", "on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return x >", "setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\": with", "test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return x > y x =", "supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self,", "{\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def", "model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x = torch.Tensor([3800,", "x / y x = torch.ones(10, 10) y = torch.zeros(10, 10) harness(setting, Model,", "def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return x > y x", "class Model(torch.nn.Module): def forward(self, x, y): return x > y x = torch.ones(10,", "Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x = torch.Tensor([3800, 4203]) harness(setting, Model, [x])", "return x > y x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10,", "opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False)", "y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\",", "# Copyright (c) 2020 Graphcore Ltd. All rights reserved. import pytest import torch", "if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point", "pytest import torch import poptorch def harness(setting, Model, args): opts = poptorch.Options() if", "supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self,", "Model(torch.nn.Module): def forward(self, x, y): return x > y x = torch.ones(10, 10)", "harness(setting, Model, args): opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting", "x, y): return x * y x = torch.zeros(10, 10) y = torch.div(torch.ones(10,", "with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on", "y): return x * y x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10),", "reason=\"Floating point exception not supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def forward(self,", "test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return x * y x =", "reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting):", "on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x):", "exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module):", "harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception", "x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x,", "= poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model", "x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\",", "\"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported", "== \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error):", "class Model(torch.nn.Module): def forward(self, x, y): return x * y x = torch.zeros(10,", "{\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return x", "def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2])", "\"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x, y): return x *", "y x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model,", "return x * y x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10,", "return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"})", "> y x = torch.ones(10, 10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting,", "poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\",", "= torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not", "def test_div0(setting): class Model(torch.nn.Module): def forward(self, x, y): return x / y x", "y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating", "class Model(torch.nn.Module): def forward(self, x): return torch.exp(x) x = torch.Tensor([3800, 4203]) harness(setting, Model,", "Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not", "poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model =", "x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),", "not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def", "\"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1,", "10) y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.parametrize(\"setting\", {\"default\",", "== \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not", "@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_nan(setting): class Model(torch.nn.Module):", "Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x])", "python3 # Copyright (c) 2020 Graphcore Ltd. All rights reserved. import pytest import", "y = torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception", "2020 Graphcore Ltd. All rights reserved. import pytest import torch import poptorch def", "* y x = torch.zeros(10, 10) y = torch.div(torch.ones(10, 10), torch.zeros(10, 10)) harness(setting,", "@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"})", "args): opts = poptorch.Options() if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\":", "point exception not supported on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x,", "x, y): return x / y x = torch.ones(10, 10) y = torch.zeros(10,", "10), torch.zeros(10, 10)) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not", "not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def", "exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module):", "poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),", "poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_ovf(setting): class Model(torch.nn.Module): def", "not supported on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return", "poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_nan(setting): class Model(torch.nn.Module): def", "== \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if", "reason=\"Floating point exception not supported on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self,", "torch import poptorch def harness(setting, Model, args): opts = poptorch.Options() if setting ==", "@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") def test_ovf(setting): class Model(torch.nn.Module):", "[x, y]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported", "supported on model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return x", "else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\",", "pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\")", "10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on", "on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class Model(torch.nn.Module): def forward(self, x,", "reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting):", "torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not", "@pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_nonreal(setting): class Model(torch.nn.Module): def forward(self, x): return torch.sqrt(x)", "def forward(self, x, y): return x > y x = torch.ones(10, 10) y", "if setting == \"true\": opts.Precision.enableFloatingPointExceptions(True) elif setting == \"false\": opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(),", "forward(self, x, y): return x / y x = torch.ones(10, 10) y =", "torch.zeros(10, 10) harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported", "on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def forward(self, x,", "setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else: poptorch_model(*args) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception", "class Model(torch.nn.Module): def forward(self, x, y): return x / y x = torch.ones(10,", "Ltd. All rights reserved. import pytest import torch import poptorch def harness(setting, Model,", "torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point", "opts.Precision.enableFloatingPointExceptions(False) poptorch_model = poptorch.inferenceModel(Model(), opts) if setting == \"true\": with pytest.raises(poptorch.Error): poptorch_model(*args) else:", "Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\") @pytest.mark.parametrize(\"setting\",", "All rights reserved. import pytest import torch import poptorch def harness(setting, Model, args):", "point exception not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_mul0inf(setting): class", "harness(setting, Model, [x, y]) @pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(), reason=\"Floating point exception not supported on model\")", "model\") def test_nan(setting): class Model(torch.nn.Module): def forward(self, x, y): return x > y", "def forward(self, x): return torch.sqrt(x) x = torch.Tensor([-1, -2]) harness(setting, Model, [x]) @pytest.mark.parametrize(\"setting\",", "<gh_stars>100-1000 #!/usr/bin/env python3 # Copyright (c) 2020 Graphcore Ltd. All rights reserved. import", "not supported on model\") @pytest.mark.parametrize(\"setting\", {\"default\", \"true\", \"false\"}) def test_div0(setting): class Model(torch.nn.Module): def" ]
[ "print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \",", "''' Created on Saturday, 3rd October 2020 8:13::01 pm @author: mtc-20 Coded on", "= cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1)", "cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read() # frame =", "frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27:", "# Load ORB, BF and model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2,", "BF and model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True)", "= cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except", "ret, frame = cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame,", "np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF", "cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model objects", "display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model", "cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: #", "Load ORB, BF and model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck", "3rd October 2020 8:13::01 pm @author: mtc-20 Coded on VS Code 2019 ------", "model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model =", "m in enumerate(matches): if index < len(matches) - 1 and m.distance < 0.75", "model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None)", "index, m in enumerate(matches): if index < len(matches) - 1 and m.distance <", "cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read() # frame = cv2.flip(frame, 1)", "2020 ''' import cv2 import numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0)", "break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \", e) print(\"[INFO] Closing...\") cap.release()", "0.75 * matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k", "0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced", "= orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good = [] for index, m", "= [] for index, m in enumerate(matches): if index < len(matches) - 1", "2020 8:13::01 pm @author: mtc-20 Coded on VS Code 2019 ------ Overview: ------", "Sat Oct 03 2020 ''' import cv2 import numpy as np def display(frame):", "good = [] for index, m in enumerate(matches): if index < len(matches) -", "cv2.destroyAllWindows() # Load ORB, BF and model objects orb = cv2.ORB_create() bf =", "frame = cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None)", "1 and m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good) < 5: #", "Saturday, 3rd October 2020 8:13::01 pm @author: mtc-20 Coded on VS Code 2019", "des_model = orb.detectAndCompute(model, None) try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL)", "induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \", e) print(\"[INFO]", "= cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\",", "Coded on VS Code 2019 ------ Overview: ------ Last Modified: Sat Oct 03", "k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e:", "cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model objects orb =", "m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\",", "if index < len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance: good.append(m)", "==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR]", "def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and", "if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model objects orb = cv2.ORB_create()", "cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out)", "matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1)", "frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0,", "out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release()", "while True: ret, frame = cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame", "640,480) while True: ret, frame = cap.read() # frame = cv2.flip(frame, 1) kp_frame,", "len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out =", "8:13::01 pm @author: mtc-20 Coded on VS Code 2019 ------ Overview: ------ Last", "on VS Code 2019 ------ Overview: ------ Last Modified: Sat Oct 03 2020", "< 5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model,", "True: ret, frame = cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame =", "cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read()", "mtc-20 Coded on VS Code 2019 ------ Overview: ------ Last Modified: Sat Oct", "kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO]", "[] for index, m in enumerate(matches): if index < len(matches) - 1 and", "out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k =", "if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out", "orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') #", "matches = bf.match(des_model, des_frame) good = [] for index, m in enumerate(matches): if", "Modified: Sat Oct 03 2020 ''' import cv2 import numpy as np def", "VS Code 2019 ------ Overview: ------ Last Modified: Sat Oct 03 2020 '''", "cv2 import numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows()", "flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\")", "cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if", "frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame)", "Overview: ------ Last Modified: Sat Oct 03 2020 ''' import cv2 import numpy", "cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \", e) print(\"[INFO] Closing...\") cap.release() cv2.destroyAllWindows()", "good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User", "index < len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance: good.append(m) if", "cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read() # frame", "< len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good)", "1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good = []", "# Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret,", "= bf.match(des_model, des_frame) good = [] for index, m in enumerate(matches): if index", "bf.match(des_model, des_frame) good = [] for index, m in enumerate(matches): if index <", "- 1 and m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good) < 5:", "Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize camera", "and model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model", "= cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches", "# print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame,", "cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception", "= True) model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model =", "Oct 03 2020 ''' import cv2 import numpy as np def display(frame): cv2.imshow(\"check\",", "ORB, BF and model objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck =", "objects orb = cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg')", "cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read() # frame = cv2.flip(frame,", "Code 2019 ------ Overview: ------ Last Modified: Sat Oct 03 2020 ''' import", "@author: mtc-20 Coded on VS Code 2019 ------ Overview: ------ Last Modified: Sat", "and m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good))", "Created on Saturday, 3rd October 2020 8:13::01 pm @author: mtc-20 Coded on VS", "exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \", e) print(\"[INFO] Closing...\")", "enumerate(matches): if index < len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance:", "compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize camera cap =", "crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model", "2019 ------ Overview: ------ Last Modified: Sat Oct 03 2020 ''' import cv2", "03 2020 ''' import cv2 import numpy as np def display(frame): cv2.imshow(\"check\", frame)", "''' import cv2 import numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if", "None) matches = bf.match(des_model, des_frame) good = [] for index, m in enumerate(matches):", "= cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints", "as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB,", "des_frame) good = [] for index, m in enumerate(matches): if index < len(matches)", "else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k", "len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance: good.append(m) if len(good) <", "------ Last Modified: Sat Oct 03 2020 ''' import cv2 import numpy as", "User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as e: print(\"[ERR] \", e)", "k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)", "------ Overview: ------ Last Modified: Sat Oct 03 2020 ''' import cv2 import", "import cv2 import numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1):", "import numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() #", "try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True:", "pm @author: mtc-20 Coded on VS Code 2019 ------ Overview: ------ Last Modified:", "numpy as np def display(frame): cv2.imshow(\"check\", frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load", "cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model objects orb = cv2.ORB_create() bf", "* matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k =", "= cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect", "frame) cv2.waitKey(0) if cv2.waitKey(1): cv2.destroyAllWindows() # Load ORB, BF and model objects orb", "cap.read() # frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches =", "Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame", "bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect and compute", "<gh_stars>0 ''' Created on Saturday, 3rd October 2020 8:13::01 pm @author: mtc-20 Coded", "if k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows() except Exception as", "keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize camera cap = cv2.VideoCapture(0)", "print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame,", "camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame =", "= orb.detectAndCompute(model, None) try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result',", "True) model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model,", "and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize camera cap", "Last Modified: Sat Oct 03 2020 ''' import cv2 import numpy as np", "cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model,", "None) try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while", "orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good = [] for index, m in", "cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:],", "cv2.ORB_create() bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True) model = cv2.imread('./../index0_s.jpg') # Detect and", "des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good = [] for index,", "# Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize", "on Saturday, 3rd October 2020 8:13::01 pm @author: mtc-20 Coded on VS Code", "orb.detectAndCompute(model, None) try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480)", "October 2020 8:13::01 pm @author: mtc-20 Coded on VS Code 2019 ------ Overview:", "k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\") break cap.release() cv2.destroyAllWindows()", "kp_model, des_model = orb.detectAndCompute(model, None) try: # Initialize camera cap = cv2.VideoCapture(0) cv2.namedWindow('Result',", "= cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good", "cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256 ==27: print(\"[INFO] User induced exit...\") break", "= cv2.imread('./../index0_s.jpg') # Detect and compute keypoints kp_model, des_model = orb.detectAndCompute(model, None) try:", "= cv2.VideoCapture(0) cv2.namedWindow('Result', cv2.WINDOW_NORMAL) cv2.resizeWindow('Result', 640,480) while True: ret, frame = cap.read() #", "< 0.75 * matches[index+1].distance: good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame)", "# frame = cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model,", "kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) cv2.imshow(\"Result\", out) k = cv2.waitKey(1) if k%256", "for index, m in enumerate(matches): if index < len(matches) - 1 and m.distance", "cv2.flip(frame, 1) kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good =", "in enumerate(matches): if index < len(matches) - 1 and m.distance < 0.75 *", "kp_frame, des_frame = orb.detectAndCompute(frame, None) matches = bf.match(des_model, des_frame) good = [] for", "5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else: out = cv2.drawMatches(model, kp_model,", "good.append(m) if len(good) < 5: # print(len(good)) cv2.imshow(\"Result\", frame) k = cv2.waitKey(1) else:" ]
[ "- start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start = time.perf_counter()", "- start}\") y = jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end =", "import time import jax import jax.numpy as jnp def f(x): return jnp.dot(x, x.T)", "start = time.perf_counter() g(y) end = time.perf_counter() total_time += end - start print(f\"total_time:", "start}\") y = jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter()", "end = time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f) y = jnp.ones((1000,", "jax.numpy as jnp def f(x): return jnp.dot(x, x.T) N_trials = 1 total_time =", "time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000, 1)) start = time.perf_counter()", "jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end -", "start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") for i", "time import jax import jax.numpy as jnp def f(x): return jnp.dot(x, x.T) N_trials", "f(x): return jnp.dot(x, x.T) N_trials = 1 total_time = 0.0 x = jnp.ones((1000,", "= 0.0 x = jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter() f(x)", "end = time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000, 1)) start", "g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000, 1))", "total_time = 0.0 x = jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter()", "range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter() total_time += end - start", "time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") for i in range(N_trials):", "{end - start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start =", "jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end -", "f = jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end -", "= jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\")", "print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y)", "y = jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time:", "g = jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end", "= time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000,", "start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") y =", "<filename>architect/test_jit.py<gh_stars>1-10 import time import jax import jax.numpy as jnp def f(x): return jnp.dot(x,", "= 1 total_time = 0.0 x = jnp.ones((1000, 1)) f = jax.jit(f) start", "time.perf_counter() print(f\"jit_time: {end - start}\") for i in range(N_trials): start = time.perf_counter() g(y)", "x = jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter() f(x) end =", "print(f\"jit_time: {end - start}\") for i in range(N_trials): start = time.perf_counter() g(y) end", "= time.perf_counter() g(y) end = time.perf_counter() total_time += end - start print(f\"total_time: {total_time}\")", "jnp def f(x): return jnp.dot(x, x.T) N_trials = 1 total_time = 0.0 x", "= time.perf_counter() print(f\"jit_time: {end - start}\") for i in range(N_trials): start = time.perf_counter()", "{end - start}\") y = jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end", "= jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end", "in range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter() total_time += end -", "= time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001,", "= jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter()", "jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\") g", "= time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f)", "1)) f = jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end", "1 total_time = 0.0 x = jnp.ones((1000, 1)) f = jax.jit(f) start =", "jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter()", "jax import jax.numpy as jnp def f(x): return jnp.dot(x, x.T) N_trials = 1", "N_trials = 1 total_time = 0.0 x = jnp.ones((1000, 1)) f = jax.jit(f)", "1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") for", "jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time:", "import jax.numpy as jnp def f(x): return jnp.dot(x, x.T) N_trials = 1 total_time", "time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000,", "i in range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter() total_time += end", "y = jnp.ones((1001, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time:", "f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f) y =", "{end - start}\") for i in range(N_trials): start = time.perf_counter() g(y) end =", "0.0 x = jnp.ones((1000, 1)) f = jax.jit(f) start = time.perf_counter() f(x) end", "= time.perf_counter() print(f\"jit_time: {end - start}\") y = jnp.ones((1001, 1000, 1)) start =", "1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") y", "print(f\"jit_time: {end - start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start", "start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y)", "= jax.vmap(f) y = jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end =", "= time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") for i in", "time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f) y", "for i in range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter() total_time +=", "1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\")", "time.perf_counter() print(f\"jit_time: {end - start}\") g = jax.vmap(f) y = jnp.ones((1000, 1000, 1))", "= jnp.ones((1000, 1000, 1)) start = time.perf_counter() g(y) end = time.perf_counter() print(f\"jit_time: {end", "jnp.dot(x, x.T) N_trials = 1 total_time = 0.0 x = jnp.ones((1000, 1)) f", "end = time.perf_counter() print(f\"jit_time: {end - start}\") for i in range(N_trials): start =", "as jnp def f(x): return jnp.dot(x, x.T) N_trials = 1 total_time = 0.0", "import jax import jax.numpy as jnp def f(x): return jnp.dot(x, x.T) N_trials =", "x.T) N_trials = 1 total_time = 0.0 x = jnp.ones((1000, 1)) f =", "- start}\") for i in range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter()", "start = time.perf_counter() f(x) end = time.perf_counter() print(f\"jit_time: {end - start}\") g =", "def f(x): return jnp.dot(x, x.T) N_trials = 1 total_time = 0.0 x =", "start}\") for i in range(N_trials): start = time.perf_counter() g(y) end = time.perf_counter() total_time", "return jnp.dot(x, x.T) N_trials = 1 total_time = 0.0 x = jnp.ones((1000, 1))", "g(y) end = time.perf_counter() print(f\"jit_time: {end - start}\") for i in range(N_trials): start" ]
[ "features should be applied on each node with the samples that reach it", "< num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm is used in every", "labels: np.array, max_features: int ) -> tuple: \"\"\"Return the variabes with higher f-score", ": bool Train time - True / Test time - False \"\"\" #", "list list with two splits of the array \"\"\" down = ~self._up return", "self._down is None def get_down(self) -> \"Snode\": return self._down def get_up(self) -> \"Snode\":", "array of samples (# samples, # features) labels : np.array labels of the", "set at each node (only used if max_features < num_features). Supported strategies are:", "is going to be made train : bool Train time - True /", "Returns ------- np.array array of shape (m, nc) with the distances of every", "None normalize : bool, optional If standardization of features should be applied on", "self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) == 0: raise StopIteration() node =", "on the subdataset of the node only if it is a leaf \"\"\"", "labels : np.array labels of the dataset labels_up : np.array labels of one", "length. Depending on hyperparameter Parameters ---------- dataset : np.array array of samples (#", "col def get_partition_column(self) -> int: return self._partition_column def set_down(self, son): self._down = son", "> max_gain: max_gain = gain selected = feature_set return selected if selected is", "min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def", "Controls the pseudo random number generation for shuffling the data for probability estimates.", "dataset \"\"\" max_gain = 0 selected = -1 for col in range(data.shape[1]): tup", "y : np.array set of labels Returns ------- float entropy \"\"\" n_labels =", "for the Gini impurity and “entropy” for the information gain., by default \"entropy\",", "normalize=False, ): self._clf = clf self._random_state = random_state if random_state is not None:", "\"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn = 0", "samples = card_up + card_dn if samples == 0: return 0.0 else: result", "self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features", "dataset[:, indices], indices def _impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return column", "account to split dataset Parameters ---------- data : np.array distances to hyper plane", "if any(down) else None, ] def _distances(self, node: Snode, data: np.ndarray) -> np.array:", "classes \"\"\" X_transformed = data[:, node._features] if self._normalize: X_transformed = node._scaler.transform(X_transformed) return node._clf.decision_function(X_transformed)", "\"\"\"Return the variabes with higher f-score Parameters ---------- dataset : np.array array of", "features def set_impurity(self, impurity): self._impurity = impurity def get_title(self) -> str: return self._title", "train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if gain >", "of features to form the subspace Returns ------- tuple tuple with the dataset", "None, ] def _distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of", "its belief based on the subdataset of the node only if it is", "\"\"\"Compute information gain of a split candidate Parameters ---------- labels : np.array labels", "None def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return (", "mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) ->", ": np.array dataset to split Returns ------- list list with two splits of", "data.ndim > 1: # split criteria for multiclass # Convert data to a", "criteria for multiclass # Convert data to a (m, 1) array selecting values", "self, clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity: float, title: str,", "(# samples, # features) labels : np.array labels of the dataset max_features :", "node.set_partition_column(col) else: # in predcit time just use the column computed in train", "(# samples, # features) labels : np.array array of labels features_sets : list", "f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def", "the dataset assigned to it Parameters ---------- clf : SVC Classifier used X", "np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) ->", "in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to be max_samples or", "np.array column of dataset to be taken into account to split dataset Returns", "0] tdn = y[data[:, col] <= 0] info_gain = self.information_gain(y, tup, tdn) if", "-> list: \"\"\"Generate at most 5 feature random combinations Parameters ---------- features :", "any(self._up) else None, origin[down] if any(down) else None, ] def _distances(self, node: Snode,", "in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini or entropy got({criterion})\" )", "indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results()", "Parameters ---------- node : Snode node containing the svm classifier data : np.ndarray", "the label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub]", "_fs_best( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the variabes", "data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to be taken", "np.array, labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute information gain of a", "be taken into account to split dataset \"\"\" # select the class with", "features. “random”: The algorithm generates 5 candidates and choose the best (max. info.", "got ({criteria})\" ) if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise", "int number of features of the subspace (<= number of features in dataset)", "dataset max_features : int number of features of the subspace (< number of", "the data for probability estimates. Ignored when probability is False.Pass an int for", "5 feature random combinations Parameters ---------- features : int number of features in", "Ignored when probability is False.Pass an int for reproducible output across multiple function", "the column computed in train time # is taking the classifier of class", "float, title: str, weight: np.ndarray = None, scaler: StandardScaler = None, ): self._clf", "of features in each combination max_features : int number of features in dataset", "samples: np.array, node: Snode, train: bool): \"\"\"Set the criteria to split arrays. Compute", "X: np.ndarray, y: np.ndarray, features: np.array, impurity: float, title: str, weight: np.ndarray =", "\"fcbf\"]: raise ValueError( \"splitter must be in {random, best, mutual, cfs, fcbf} got", "where partition is going to be made train : bool Train time -", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the variabes with higher", "= Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1,", "and its belief based on the subdataset of the node only if it", "array of samples labels : np.array labels of the dataset max_features : int", "dataset Parameters ---------- data : np.array distances to hyper plane of every class", "criteria not in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to be", "multi class classification) which column (class) use to split the dataset in a", ": np.array labels of the dataset labels_up : np.array labels of one side", "np.ndarray) -> np.array: \"\"\"Compute distances of the samples to the hyperplane of the", "to hyper plane of every class y : np.array column of dataset to", "SVC: return self._clf def get_impurity(self) -> float: return self._impurity def get_features(self) -> np.array:", "Returns ------- tuple tuple with the dataset with only the features selected and", "np.array, labels: np.array, features_sets: list ) -> list: \"\"\"Return the best set of", "on hyperparameter Parameters ---------- dataset : np.array array of samples (# samples, #", "= None, ): self._clf = clf self._title = title self._belief = 0.0 #", "feature random combinations Parameters ---------- features : int number of features in each", "choose the max_features best features. “random”: The algorithm generates 5 candidates and choose", "the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time just", "the indices of the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return", "count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\"", "the criterion is the information gain Parameters ---------- dataset : np.array array of", "train time # is taking the classifier of class <col> col = node.get_partition_column()", "a subspace of the selected dataset of max_features length. Depending on hyperparameter Parameters", "0.0 # Compute standard entropy. for prop in proportions: if prop != 0.0:", "_get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Compute the", "indices def _impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset", "title : str label describing the route to the node weight : np.ndarray,", ": np.array array of samples labels : np.array labels of the dataset max_features", "Returns ------- tuple indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False)", "in dataset) Returns ------- tuple indices of the features selected \"\"\" # No", "set_title(self, title): self._title = title def set_classifier(self, clf): self._clf = clf def set_features(self,", "@classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y, node._features,", "raise ValueError( f\"criterion must be gini or entropy got({criterion})\" ) if criteria not", "return self._impurity def get_features(self) -> np.array: return self._features def set_up(self, son): self._up =", "of features randomly selected \"\"\" comb = set() # Generate at most 5", "def _select_best_set( self, dataset: np.array, labels: np.array, features_sets: list ) -> list: \"\"\"Return", "-> float: _, count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count)))", "and the indices of the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features)", "a sklearn estimator ValueError criterion must be gini or entropy ValueError criteria has", "the class of the predictor and its belief based on the subdataset of", "---------- features : int number of features in each combination max_features : int", "if criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini or", ": SVC Classifier used X : np.ndarray input dataset in train time (only", "* factorial(features - max_features) ) set_length = min(5, number) while len(comb) < set_length:", "with the label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub:", "in train time, by default None scaler : StandardScaler, optional scaler used if", "str, optional ecides (just in case of a multi class classification) which column", "labels Returns ------- float entropy \"\"\" n_labels = len(y) if n_labels <= 1:", "array of (m, nc) nc = # classes data = self._distances(node, samples) if", "def part(self, origin: np.array) -> list: \"\"\"Split an array in two based on", "are: “best”: sklearn SelectKBest algorithm is used in every node to choose the", "__iter__(self): # To complete the iterator interface return self def _push(self, node: Snode):", "Compute standard entropy. for prop in proportions: if prop != 0.0: entropy -=", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the variabes with", "random from math import log, factorial import numpy as np from sklearn.feature_selection import", "entropy ValueError criteria has to be max_samples or impurity ValueError splitter must be", "feature selection with max_features limit Parameters ---------- dataset : np.array array of samples", "to input dataset in train time, by default None scaler : StandardScaler, optional", "clf : SVC Classifier used X : np.ndarray input dataset in train time", "Raises ------ ValueError clf has to be a sklearn estimator ValueError criterion must", "in {random, best, mutual, cfs, fcbf} \"\"\" def __init__( self, clf: SVC =", "if data.ndim > 1: # split criteria for multiclass # Convert data to", "features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "~self._up return [ origin[self._up] if any(self._up) else None, origin[down] if any(down) else None,", "of samples required to split an internal node. 0 (default) for any, by", "return result def _select_best_set( self, dataset: np.array, labels: np.array, features_sets: list ) ->", "split the dataset in a node. max_samples is incompatible with 'ovo' multiclass_strategy, by", "list ) -> list: \"\"\"Return the best set of features among feature_sets, the", "there aren't enough samples to split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim", "tuple indices of the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True)", "features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual(", "a sklearn estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion", "np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card) self._class = classes[card ==", "imp_up - (card_dn / samples) * imp_dn ) return result def _select_best_set( self,", "dataset Returns ------- np.array column of dataset to be taken into account to", "the node only if it is a leaf \"\"\" if not self.is_leaf(): return", "def _fs_cfs( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature", "of the array \"\"\" down = ~self._up return [ origin[self._up] if any(self._up) else", "self._impurity def get_features(self) -> np.array: return self._features def set_up(self, son): self._up = son", "_impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to be", "= y[data[:, col] > 0] tdn = y[data[:, col] <= 0] info_gain =", "features selected \"\"\" # Random feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features,", "0 selected = -1 for col in range(data.shape[1]): tup = y[data[:, col] >", "max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features:", "to hyper plane of every class y : np.array vector of labels (classes)", ") if criteria not in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has", "of samples (# samples, # features) labels : np.array array of labels features_sets", "into account to split dataset \"\"\" # select the class with max number", "title: str, weight: np.ndarray = None, scaler: StandardScaler = None, ): self._clf =", "of classes \"\"\" X_transformed = data[:, node._features] if self._normalize: X_transformed = node._scaler.transform(X_transformed) return", "data : np.array distances to hyper plane of every class y : np.array", "be taken into account to split dataset \"\"\" max_gain = 0 selected =", "number of features in dataset Returns ------- list list with up to 5", "samples, # features) labels : np.array labels of the dataset max_features : int", "= clf self._title = title self._belief = 0.0 # Only store dataset in", "self._up def make_predictor(self): \"\"\"Compute the class of the predictor and its belief based", "self._min_samples_split: # there aren't enough samples to split self._up = np.ones((data.shape[0]), dtype=bool) return", "# is taking the classifier of class <col> col = node.get_partition_column() if col", "on SVM nodes Splitter class \"\"\" import os import warnings import random from", "one side labels_dn : np.array labels on the other side Returns ------- float", "= MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array,", "son): self._down = son def set_title(self, title): self._title = title def set_classifier(self, clf):", "used if max_features < num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm is", "_push(self, node: Snode): if node is not None: self._stack.append(node) def __next__(self) -> Snode:", "f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return (", "to one side of the tree (up) Parameters ---------- samples : np.array array", "None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode(", "max_samples is incompatible with 'ovo' multiclass_strategy, by default None min_samples_split : int, optional", "return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set the", "data = np.ones(data.shape) data = data[:, col] self._up = data > 0 def", "nodes Splitter class \"\"\" import os import warnings import random from math import", "bool Train time - True / Test time - False \"\"\" # data", "discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y)", "from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC", "classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there aren't enough", "int ) -> tuple: \"\"\"Return the best features with mutual information with labels", "( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features = features", "comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array,", "the tree where partition is going to be made train : bool Train", "or impurity ValueError splitter must be in {random, best, mutual, cfs, fcbf} \"\"\"", "tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array,", "= np.bincount(y) proportions = counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes <=", "which column (class) use to split the dataset in a node. max_samples is", "of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results()", ": np.array labels of one side labels_dn : np.array labels on the other", "log(prop, n_classes) return entropy def information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array", "partition is going to be made train : bool Train time - True", "classification) which column (class) use to split the dataset in a node. max_samples", "impurity self._partition_column: int = -1 self._scaler = scaler @classmethod def copy(cls, node: \"Snode\")", "strategy used to choose the feature set at each node (only used if", "Snode): if node is not None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack)", "np.ndarray = None, scaler: StandardScaler = None, ): self._clf = clf self._title =", "category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset,", "samples (# samples, # features) labels : np.array array of labels features_sets :", "features used to compute hyperplane impurity : float impurity of the node title", "subdataset of the node only if it is a leaf \"\"\" if not", "0 def part(self, origin: np.array) -> list: \"\"\"Split an array in two based", "normalize : bool, optional If standardization of features should be applied on each", "int) -> list: \"\"\"Generate at most 5 feature random combinations Parameters ---------- features", "[] self._push(tree) def __iter__(self): # To complete the iterator interface return self def", "for samples if train: # in train time we have to compute the", "any, by default None random_state : optional Controls the pseudo random number generation", "get_subspace( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn a", "# Generate at most 5 combinations number = factorial(features) / ( factorial(max_features) *", "n_classes) return entropy def information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array )", "self._up = son def is_leaf(self) -> bool: return self._up is None and self._down", "str = None, feature_select: str = None, criteria: str = None, min_samples_split: int", "= self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels:", "np.array, features_sets: list ) -> list: \"\"\"Return the best set of features among", "in train time features : np.array features used to compute hyperplane impurity :", "weight: np.ndarray = None, scaler: StandardScaler = None, ): self._clf = clf self._title", "getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self,", "node is not None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) == 0:", "of features should be applied on each node with the samples that reach", "if criteria not in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to", "tuple: \"\"\"Compute the indices of the features selected by splitter depending on the", "the dataset max_features : int number of features of the subspace (< number", "> 0 def part(self, origin: np.array) -> list: \"\"\"Split an array in two", "------- np.array array of shape (m, nc) with the distances of every sample", "---------- labels : np.array labels of the dataset labels_up : np.array labels of", "choose the best (max. info. gain) of them. \"mutual\": Chooses the best features", ": Snode Node of the tree where partition is going to be made", "clf): self._clf = clf def set_features(self, features): self._features = features def set_impurity(self, impurity):", "account to split dataset \"\"\" max_gain = 0 selected = -1 for col", "dataset of max_features length. Depending on hyperparameter Parameters ---------- dataset : np.array array", "preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack = [] self._push(tree) def __iter__(self): #", "form the subspace Returns ------- tuple tuple with the dataset with only the", "impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree:", "imp_dn ) return result def _select_best_set( self, dataset: np.array, labels: np.array, features_sets: list", "to be taken into account to split dataset Returns ------- np.array column of", "gini or entropy got({criterion})\" ) if criteria not in [ \"max_samples\", \"impurity\", ]:", "self._up = None self._class = None self._feature = None self._sample_weight = ( weight", "def _gini(y: np.array) -> float: _, count = np.unique(y, return_counts=True) return 1 -", "every sample to every class hyperplane # array of (m, nc) nc =", "of them. \"mutual\": Chooses the best features w.r.t. their mutual info with the", "numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from", "str label describing the route to the node weight : np.ndarray, optional weights", "!= \"NS\" else None self._y = y self._down = None self._up = None", "sample to every class hyperplane # array of (m, nc) nc = #", "str, optional The function to measure the quality of a split (only used", "int number of features in dataset Returns ------- list list with up to", "for multiclass # Convert data to a (m, 1) array selecting values for", "by default \"entropy\", by default None feature_select : str, optional The strategy used", "of the node title : str label describing the route to the node", "the feature set at each node (only used if max_features < num_features). Supported", "time we have to compute the column to take into # account to", "card_up + card_dn if samples == 0: return 0.0 else: result = (", "= None self._class = None self._feature = None self._sample_weight = ( weight if", "among feature_sets, the criterion is the information gain Parameters ---------- dataset : np.array", "Parameters ---------- clf : SVC Classifier used X : np.ndarray input dataset in", "__next__(self) -> Snode: if len(self._stack) == 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up())", "+ card_dn if samples == 0: return 0.0 else: result = ( imp_prev", "in train time we have to compute the column to take into #", "array selecting values for samples if train: # in train time we have", "from mufs import MUFS class Snode: \"\"\" Nodes of the tree that keeps", "is not None else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn", "None scaler : StandardScaler, optional scaler used if any, by default None \"\"\"", "selected dataset of max_features length. Depending on hyperparameter Parameters ---------- dataset : np.array", "select features as selected in constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self,", "SVC = None, criterion: str = None, feature_select: str = None, criteria: str", "labels Parameters ---------- dataset : np.array array of samples labels : np.array labels", "set of features among feature_sets, the criterion is the information gain Parameters ----------", "n_features = dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) # select features as", "entropy got({criterion})\" ) if criteria not in [ \"max_samples\", \"impurity\", ]: raise ValueError(", "self._clf def get_impurity(self) -> float: return self._impurity def get_features(self) -> np.array: return self._features", "node.get_partition_column() if col == -1: # No partition is producing information gain data", "combinations number = factorial(features) / ( factorial(max_features) * factorial(features - max_features) ) set_length", "self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array,", "return self._clf def get_impurity(self) -> float: return self._impurity def get_features(self) -> np.array: return", "node. max_samples is incompatible with 'ovo' multiclass_strategy, by default None min_samples_split : int,", "not in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to be max_samples", "one side of the tree (up) Parameters ---------- samples : np.array array of", "= np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief:", "labels_dn is not None else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up +", "dataset) Returns ------- tuple indices of the features selected \"\"\" mufs = MUFS(max_features=max_features,", "iterator\"\"\" def __init__(self, tree: Snode): self._stack = [] self._push(tree) def __iter__(self): # To", "the features selected and the indices of the features selected \"\"\" indices =", "hyperplane # array of (m, nc) nc = # classes data = self._distances(node,", "\"impurity\", ]: raise ValueError( f\"criteria has to be max_samples or impurity; got ({criteria})\"", "\"\"\" comb = set() # Generate at most 5 combinations number = factorial(features)", "SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features:", "function to measure the quality of a split (only used if max_features !=", "= card_up + card_dn if samples == 0: return 0.0 else: result =", "and “entropy” for the information gain., by default \"entropy\", by default None feature_select", "(class) use to split the dataset in a node. max_samples is incompatible with", "def _impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to", ") -> tuple: \"\"\"Return the variabes with higher f-score Parameters ---------- dataset :", "---------- samples : np.array array of samples (# samples, # features) node :", "np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f}", "The minimum number of samples required to split an internal node. 0 (default)", "to the hyperplane of the node Parameters ---------- node : Snode node containing", "SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity: float, title: str, weight: np.ndarray", "of shape (m, nc) with the distances of every sample to the hyperplane", "-> \"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, )", "classes[0] except IndexError: self._class = None def __str__(self) -> str: count_values = np.unique(self._y,", "data > 0 def part(self, origin: np.array) -> list: \"\"\"Split an array in", "origin[down] if any(down) else None, ] def _distances(self, node: Snode, data: np.ndarray) ->", "= classes[card == max_card][0] self._belief = max_card / np.sum(card) else: self._belief = 1", "Returns ------- list list with two splits of the array \"\"\" down =", "and its complement partition has to be called first to establish up indices", "\"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in {random, best, mutual,", "entropy \"\"\" n_labels = len(y) if n_labels <= 1: return 0 counts =", "(m, 1) array selecting values for samples if train: # in train time", "best features w.r.t. their mutual info with the label. \"cfs\": Apply Correlation-based Feature", "self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array, labels: np.array, max_features: int )", "= 0.0 # Compute standard entropy. for prop in proportions: if prop !=", "= self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def _impurity(self, data: np.array, y:", "the samples to the hyperplane of the node Parameters ---------- node : Snode", "to measure the quality of a split (only used if max_features != num_features).", "samples) * imp_up - (card_dn / samples) * imp_dn ) return result def", "not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in", "imp_dn = 0 if labels_up is not None: card_up = labels_up.shape[0] imp_up =", "split dataset \"\"\" max_gain = 0 selected = -1 for col in range(data.shape[1]):", ": int number of features of the subspace (<= number of features in", ": np.array array of samples (# samples, # features) labels : np.array array", "the dataset labels_up : np.array labels of one side labels_dn : np.array labels", "node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of the samples to the", "if labels_dn is not None: card_dn = labels_dn.shape[0] if labels_dn is not None", "= set() # Generate at most 5 combinations number = factorial(features) / (", "based on different criteria Parameters ---------- clf : SVC, optional classifier, by default", "\"\"\" # No feature reduction n_features = dataset.shape[1] if n_features == max_features: return", "# account to split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: #", "should go to one side of the tree (up) Parameters ---------- samples :", "only the features selected and the indices of the features selected \"\"\" indices", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn a subspace of the", "return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) -> float:", "= imp_dn = 0 if labels_up is not None: card_up = labels_up.shape[0] imp_up", "\"fcbf\": Apply Fast Correlation- Based, by default None criteria : str, optional ecides", "quality of a split (only used if max_features != num_features). Supported criteria are", "f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode):", "selecting values for samples if train: # in train time we have to", "containing the svm classifier data : np.ndarray samples to compute distance to hyperplane", "np.array: \"\"\"return column of dataset to be taken into account to split dataset", "\"entropy\", by default None feature_select : str, optional The strategy used to choose", "to form the subspace Returns ------- tuple tuple with the dataset with only", "of samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array,", "\"\"\" down = ~self._up return [ origin[self._up] if any(self._up) else None, origin[down] if", "Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by default None criteria :", "self._criteria = criteria self._feature_select = feature_select self._normalize = normalize if clf is None:", "class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack = [] self._push(tree)", "------- list best feature set \"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\",", "the variabes with higher f-score Parameters ---------- dataset : np.array array of samples", "self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Compute the indices", "# select features as selected in constructor return self.fs_function(dataset, labels, max_features) def get_subspace(", "-> list: \"\"\"Split an array in two based on indices (self._up) and its", "max_features))) ) return list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int", "\"entropy\"]: raise ValueError( f\"criterion must be gini or entropy got({criterion})\" ) if criteria", "the dataset max_features : int number of features to form the subspace Returns", "labels on the other side Returns ------- float information gain \"\"\" imp_prev =", "return 0.0 else: result = ( imp_prev - (card_up / samples) * imp_up", "\"\"\" Splits a dataset in two based on different criteria Parameters ---------- clf", "an int for reproducible output across multiple function calls, by default None normalize", "used in every node to choose the max_features best features. “random”: The algorithm", "gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn =", "[\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini or entropy got({criterion})\" ) if", "np.array labels of the dataset max_features : int number of features of the", "return dataset[:, indices], indices def _impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return", "the svm classifier and if testing the dataset assigned to it Parameters ----------", "np.array, max_features: int ) -> tuple: \"\"\"Return the best of five random feature", "int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters ----------", "the features selected \"\"\" # No feature reduction n_features = dataset.shape[1] if n_features", "normalize if clf is None: raise ValueError(f\"clf has to be a sklearn estimator,", "\"\"\"Compute distances of the samples to the hyperplane of the node Parameters ----------", "\"\"\" if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if len(classes) >", "son def is_leaf(self) -> bool: return self._up is None and self._down is None", "None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) == 0: raise StopIteration() node", "# return best features with mutual info with the label feature_list = mutual_info_classif(dataset,", "= info_gain return selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return", "return_counts=True) if len(classes) > 1: max_card = max(card) self._class = classes[card == max_card][0]", "Supported criteria are “gini” for the Gini impurity and “entropy” for the information", "-> int: return self._partition_column def set_down(self, son): self._down = son def set_title(self, title):", "[ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to be max_samples or impurity;", "gain data = np.ones(data.shape) data = data[:, col] self._up = data > 0", "belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f}", "split dataset Parameters ---------- data : np.array distances to hyper plane of every", "information gain data = np.ones(data.shape) data = data[:, col] self._up = data >", "= col def get_partition_column(self) -> int: return self._partition_column def set_down(self, son): self._down =", "mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels:", "np.array, max_features: int ) -> tuple: \"\"\"Re3turn a subspace of the selected dataset", "by default None criteria : str, optional ecides (just in case of a", "with only the features selected and the indices of the features selected \"\"\"", "self._scaler = scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf,", "clf self._random_state = random_state if random_state is not None: random.seed(random_state) self._criterion = criterion", "in dataset) Returns ------- tuple indices of the features selected \"\"\" return (", "not None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) == 0: raise StopIteration()", "]: raise ValueError( f\"criteria has to be max_samples or impurity; got ({criteria})\" )", "The strategy used to choose the feature set at each node (only used", "prop * log(prop, n_classes) return entropy def information_gain( self, labels: np.array, labels_up: np.array,", "MUFS class Snode: \"\"\" Nodes of the tree that keeps the svm classifier", "feature set \"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set", "dataset in train time (only in testing) y : np.ndarray input labes in", "max_gain = gain selected = feature_set return selected if selected is not None", ": SVC, optional classifier, by default None criterion : str, optional The function", "to split the dataset in a node. max_samples is incompatible with 'ovo' multiclass_strategy,", "import MUFS class Snode: \"\"\" Nodes of the tree that keeps the svm", "to split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit", "False.Pass an int for reproducible output across multiple function calls, by default None", "best features with mutual info with the label feature_list = mutual_info_classif(dataset, labels) return", "0] info_gain = self.information_gain(y, tup, tdn) if info_gain > max_gain: selected = col", "= counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1: return 0", "into account to split dataset \"\"\" max_gain = 0 selected = -1 for", "combination of features randomly selected \"\"\" comb = set() # Generate at most", "# split criteria for multiclass # Convert data to a (m, 1) array", "features selected and the indices of the features selected \"\"\" indices = self._get_subspaces_set(dataset,", "def __init__(self, tree: Snode): self._stack = [] self._push(tree) def __iter__(self): # To complete", "data: np.ndarray) -> np.array: \"\"\"Compute distances of the samples to the hyperplane of", "not None else feature_set @staticmethod def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate", "feature reduction n_features = dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) # select", ": np.array labels of the dataset max_features : int number of features to", "Oblique decision tree classifier based on SVM nodes Splitter class \"\"\" import os", "else: # in predcit time just use the column computed in train time", "max_card][0] self._belief = max_card / np.sum(card) else: self._belief = 1 try: self._class =", "of the node only if it is a leaf \"\"\" if not self.is_leaf():", "tree that keeps the svm classifier and if testing the dataset assigned to", "\"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array,", "self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features: int )", ": np.array labels of the dataset max_features : int number of features of", "samples : np.array array of samples (# samples, # features) node : Snode", "number of features of the subspace (< number of features in dataset) Returns", "not None: card_dn = labels_dn.shape[0] if labels_dn is not None else 0 imp_dn", "labels_dn.shape[0] if labels_dn is not None else 0 imp_dn = self.criterion_function(labels_dn) samples =", "bool): \"\"\"Set the criteria to split arrays. Compute the indices of the samples", "_, count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def", "of the node Parameters ---------- node : Snode node containing the svm classifier", "compute the column to take into # account to split the dataset col", "return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _, count = np.unique(y, return_counts=True)", "to split Returns ------- list list with two splits of the array \"\"\"", "node to choose the max_features best features. “random”: The algorithm generates 5 candidates", "_gini(y: np.array) -> float: _, count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count", "float: return self._impurity def get_features(self) -> np.array: return self._features def set_up(self, son): self._up", "is producing information gain data = np.ones(data.shape) data = data[:, col] self._up =", "by default False Raises ------ ValueError clf has to be a sklearn estimator", "feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self,", "number of samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples:", "return selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column of", "np.array labels of one side labels_dn : np.array labels on the other side", ": StandardScaler, optional scaler used if any, by default None \"\"\" def __init__(", "predictor and its belief based on the subdataset of the node only if", "self._push(node.get_down()) return node class Splitter: \"\"\" Splits a dataset in two based on", "node : Snode Node of the tree where partition is going to be", "impurity; got ({criteria})\" ) if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]:", "os import warnings import random from math import log, factorial import numpy as", "\"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def", "5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array)", "= -1 for col in range(data.shape[1]): tup = y[data[:, col] > 0] tdn", "the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def", "interface return self def _push(self, node: Snode): if node is not None: self._stack.append(node)", "scaler : StandardScaler, optional scaler used if any, by default None \"\"\" def", "self._impurity = impurity self._partition_column: int = -1 self._scaler = scaler @classmethod def copy(cls,", "labels features_sets : list list of features sets to check Returns ------- list", "the information gain., by default \"entropy\", by default None feature_select : str, optional", "and choose the best (max. info. gain) of them. \"mutual\": Chooses the best", "should be applied on each node with the samples that reach it ,", "predcit time just use the column computed in train time # is taking", "int = -1 self._scaler = scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\":", "to compute distance to hyperplane Returns ------- np.array array of shape (m, nc)", "Correlation-based Filter algorithm with max_features limit Parameters ---------- dataset : np.array array of", "array of shape (m, nc) with the distances of every sample to the", "random_state if random_state is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split", "def get_title(self) -> str: return self._title def get_classifier(self) -> SVC: return self._clf def", "= self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time just use the column", "random_state is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria =", "n_labels <= 1: return 0 counts = np.bincount(y) proportions = counts / n_labels", "def set_up(self, son): self._up = son def is_leaf(self) -> bool: return self._up is", "> 1: max_card = max(card) self._class = classes[card == max_card][0] self._belief = max_card", "mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def", "np.array array of samples (# samples, # features) labels : np.array array of", "col = node.get_partition_column() if col == -1: # No partition is producing information", "origin : np.array dataset to split Returns ------- list list with two splits", "= np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node: Snode, train: bool):", "features_sets: list ) -> list: \"\"\"Return the best set of features among feature_sets,", "------- float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up", "\" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class", "set() # Generate at most 5 combinations number = factorial(features) / ( factorial(max_features)", "self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2 =", "labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn a subspace of the selected", "np.array) -> float: \"\"\"Compute entropy of a labels set Parameters ---------- y :", "np.argmax(samples) def partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set the criteria to", "if col == -1: # No partition is producing information gain data =", "combinations Parameters ---------- features : int number of features in each combination max_features", "while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self,", "f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "any(down) else None, ] def _distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute", "y self._down = None self._up = None self._class = None self._feature = None", "to choose the feature set at each node (only used if max_features <", "np.array array of shape (m, nc) with the distances of every sample to", "tuple: \"\"\"Return the variabes with higher f-score Parameters ---------- dataset : np.array array", "Splits a dataset in two based on different criteria Parameters ---------- clf :", "default None criterion : str, optional The function to measure the quality of", "is taking the classifier of class <col> col = node.get_partition_column() if col ==", "features : np.array features used to compute hyperplane impurity : float impurity of", "must be gini or entropy ValueError criteria has to be max_samples or impurity", "return list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int ) ->", "return np.argmax(samples) def partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set the criteria", "be made train : bool Train time - True / Test time -", "algorithm generates 5 candidates and choose the best (max. info. gain) of them.", "labels : np.array labels of the dataset max_features : int number of features", "self.criterion_function(labels_up) if labels_dn is not None: card_dn = labels_dn.shape[0] if labels_dn is not", "samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node:", "tuple(range(n_features)) # select features as selected in constructor return self.fs_function(dataset, labels, max_features) def", ": int number of features in each combination max_features : int number of", "and self._down is None def get_down(self) -> \"Snode\": return self._down def get_up(self) ->", "class with max number of samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples)", "every sample to the hyperplane of every class. nc = # of classes", "node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight,", "def _entropy(y: np.array) -> float: \"\"\"Compute entropy of a labels set Parameters ----------", "@staticmethod def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at most 5 feature", "default False Raises ------ ValueError clf has to be a sklearn estimator ValueError", "= None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node =", "at most 5 feature random combinations Parameters ---------- features : int number of", "y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _,", "= np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy = 0.0 # Compute", "based on SVM nodes Splitter class \"\"\" import os import warnings import random", "set_features(self, features): self._features = features def set_impurity(self, impurity): self._impurity = impurity def get_title(self)", "__str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} -", "list: \"\"\"Split an array in two based on indices (self._up) and its complement", "= criteria self._feature_select = feature_select self._normalize = normalize if clf is None: raise", "of dataset to be taken into account to split dataset Returns ------- np.array", "= imp_up = imp_dn = 0 if labels_up is not None: card_up =", "info_gain return selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column", "depending on the self._feature_select hyper parameter Parameters ---------- dataset : np.array array of", "------- tuple indices of the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels)", "= [] self._push(tree) def __iter__(self): # To complete the iterator interface return self", "establish up indices Parameters ---------- origin : np.array dataset to split Returns -------", "to hyperplane Returns ------- np.array array of shape (m, nc) with the distances", "time features : np.array features used to compute hyperplane impurity : float impurity", "float: \"\"\"Compute entropy of a labels set Parameters ---------- y : np.array set", "any, by default None \"\"\" def __init__( self, clf: SVC, X: np.ndarray, y:", "0 counts = np.bincount(y) proportions = counts / n_labels n_classes = np.count_nonzero(proportions) if", "The function to measure the quality of a split (only used if max_features", "-> SVC: return self._clf def get_impurity(self) -> float: return self._impurity def get_features(self) ->", "card_dn = imp_up = imp_dn = 0 if labels_up is not None: card_up", "down = ~self._up return [ origin[self._up] if any(self._up) else None, origin[down] if any(down)", "labels: np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features", "return best features with mutual info with the label feature_list = mutual_info_classif(dataset, labels)", ": bool, optional If standardization of features should be applied on each node", "criteria Parameters ---------- clf : SVC, optional classifier, by default None criterion :", "it Parameters ---------- clf : SVC Classifier used X : np.ndarray input dataset", "has to be a sklearn estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]:", "* imp_dn ) return result def _select_best_set( self, dataset: np.array, labels: np.array, features_sets:", "clf has to be a sklearn estimator ValueError criterion must be gini or", ": np.array array of samples (# samples, # features) labels : np.array labels", "be called first to establish up indices Parameters ---------- origin : np.array dataset", "class <col> col = node.get_partition_column() if col == -1: # No partition is", "the tree (up) Parameters ---------- samples : np.array array of samples (# samples,", "random_state : optional Controls the pseudo random number generation for shuffling the data", "self def _push(self, node: Snode): if node is not None: self._stack.append(node) def __next__(self)", "max_features != num_features). Supported criteria are “gini” for the Gini impurity and “entropy”", "0 imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn if samples == 0:", "1: return 0 entropy = 0.0 # Compute standard entropy. for prop in", "== max_features: return tuple(range(n_features)) # select features as selected in constructor return self.fs_function(dataset,", "has to be max_samples or impurity ValueError splitter must be in {random, best,", "= criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize =", "self._clf = clf self._random_state = random_state if random_state is not None: random.seed(random_state) self._criterion", "= 1 try: self._class = classes[0] except IndexError: self._class = None def __str__(self)", "impurity ValueError splitter must be in {random, best, mutual, cfs, fcbf} \"\"\" def", "------- np.array column of dataset to be taken into account to split dataset", "with mutual information with labels Parameters ---------- dataset : np.array array of samples", "- False \"\"\" # data contains the distances of every sample to every", "leaf \"\"\" if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if len(classes)", "the svm classifier data : np.ndarray samples to compute distance to hyperplane Returns", "factorial(max_features) * factorial(features - max_features) ) set_length = min(5, number) while len(comb) <", "Returns ------- list list with up to 5 combination of features randomly selected", "features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf(", "= None, min_samples_split: int = None, random_state=None, normalize=False, ): self._clf = clf self._random_state", "a dataset in two based on different criteria Parameters ---------- clf : SVC,", "into account to split dataset Parameters ---------- data : np.array distances to hyper", "with 'ovo' multiclass_strategy, by default None min_samples_split : int, optional The minimum number", "column (class) use to split the dataset in a node. max_samples is incompatible", "raise ValueError( \"splitter must be in {random, best, mutual, cfs, fcbf} got \"", "to every class hyperplane # array of (m, nc) nc = # classes", "1) array selecting values for samples if train: # in train time we", "must be in {random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function", "-> float: \"\"\"Compute entropy of a labels set Parameters ---------- y : np.array", "'ovo' multiclass_strategy, by default None min_samples_split : int, optional The minimum number of", "# there aren't enough samples to split self._up = np.ones((data.shape[0]), dtype=bool) return if", "is incompatible with 'ovo' multiclass_strategy, by default None min_samples_split : int, optional The", "for shuffling the data for probability estimates. Ignored when probability is False.Pass an", "if random_state is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria", "str, weight: np.ndarray = None, scaler: StandardScaler = None, ): self._clf = clf", "else None, origin[down] if any(down) else None, ] def _distances(self, node: Snode, data:", "(card_up / samples) * imp_up - (card_dn / samples) * imp_dn ) return", "array of labels features_sets : list list of features sets to check Returns", "self._belief = 0.0 # Only store dataset in Testing self._X = X if", "the features selected \"\"\" # return best features with mutual info with the", "np.array ) -> float: \"\"\"Compute information gain of a split candidate Parameters ----------", "features as selected in constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset:", "optional ecides (just in case of a multi class classification) which column (class)", "the classifier of class <col> col = node.get_partition_column() if col == -1: #", "self._random_state = random_state if random_state is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split", "in dataset) Returns ------- tuple indices of the features selected \"\"\" # return", "card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card) self._class =", "str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class}", "distances of every sample to every class hyperplane # array of (m, nc)", "split criteria for multiclass # Convert data to a (m, 1) array selecting", "np.array, y: np.array) -> np.array: \"\"\"return column of dataset to be taken into", "the indices of the features selected by splitter depending on the self._feature_select hyper", "# select the class with max number of samples _, samples = np.unique(y,", "Parameters ---------- y : np.array set of labels Returns ------- float entropy \"\"\"", "imp_prev - (card_up / samples) * imp_up - (card_dn / samples) * imp_dn", "= labels_dn.shape[0] if labels_dn is not None else 0 imp_dn = self.criterion_function(labels_dn) samples", "array of samples (# samples, # features) labels : np.array array of labels", "testing the dataset assigned to it Parameters ---------- clf : SVC Classifier used", "@staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Fast", "partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float:", "get_partition_column(self) -> int: return self._partition_column def set_down(self, son): self._down = son def set_title(self,", "None else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn if samples", "be a sklearn estimator ValueError criterion must be gini or entropy ValueError criteria", "store dataset in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else", "labels_dn : np.array labels on the other side Returns ------- float information gain", "self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card =", "------- list list with up to 5 combination of features randomly selected \"\"\"", ".6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\"", "\"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class of the predictor and its", "entropy = 0.0 # Compute standard entropy. for prop in proportions: if prop", "= None, criterion: str = None, feature_select: str = None, criteria: str =", "optional The function to measure the quality of a split (only used if", "self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if", "np.array labels of the dataset max_features : int number of features to form", "self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits a dataset in two", "max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:,", "def _fs_best( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the", "vector of labels (classes) Returns ------- np.array column of dataset to be taken", "str: return self._title def get_classifier(self) -> SVC: return self._clf def get_impurity(self) -> float:", "feature_sets, the criterion is the information gain Parameters ---------- dataset : np.array array", "for reproducible output across multiple function calls, by default None normalize : bool,", "sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from mufs", "the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices", "self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array,", "np.ndarray, y: np.ndarray, features: np.array, impurity: float, title: str, weight: np.ndarray = None,", "Snode: if len(self._stack) == 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return", "({criteria})\" ) if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError(", "Filter algorithm with max_features limit Parameters ---------- dataset : np.array array of samples", "a labels set Parameters ---------- y : np.array set of labels Returns -------", "int ) -> tuple: \"\"\"Compute the indices of the features selected by splitter", "estimator ValueError criterion must be gini or entropy ValueError criteria has to be", "going to be made train : bool Train time - True / Test", "get_title(self) -> str: return self._title def get_classifier(self) -> SVC: return self._clf def get_impurity(self)", "to the hyperplane of every class. nc = # of classes \"\"\" X_transformed", "card_up = card_dn = imp_up = imp_dn = 0 if labels_up is not", "not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card", "!= 0.0: entropy -= prop * log(prop, n_classes) return entropy def information_gain( self,", "result = ( imp_prev - (card_up / samples) * imp_up - (card_dn /", "warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf,", "# classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there aren't", "------- tuple indices of the features selected \"\"\" # return best features with", "os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y = y self._down = None self._up", "self._feature = None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else", "---------- dataset : np.array array of samples labels : np.array labels of the", "y : np.ndarray input labes in train time features : np.array features used", "the features selected by splitter depending on the self._feature_select hyper parameter Parameters ----------", "to it Parameters ---------- clf : SVC Classifier used X : np.ndarray input", "on indices (self._up) and its complement partition has to be called first to", "the tree that keeps the svm classifier and if testing the dataset assigned", "the column to take into # account to split the dataset col =", "set_length = min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return", "info with the label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda", "Parameters ---------- clf : SVC, optional classifier, by default None criterion : str,", "set of labels Returns ------- float entropy \"\"\" n_labels = len(y) if n_labels", "node containing the svm classifier data : np.ndarray samples to compute distance to", "input labes in train time features : np.array features used to compute hyperplane", "dataset to be taken into account to split dataset \"\"\" # select the", "def partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set the criteria to split", "Returns ------- float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn =", "is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria = criteria", "with labels Parameters ---------- dataset : np.array array of samples labels : np.array", "self._title = title def set_classifier(self, clf): self._clf = clf def set_features(self, features): self._features", "gain., by default \"entropy\", by default None feature_select : str, optional The strategy", "dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) # select features as selected in", "Splitter class \"\"\" import os import warnings import random from math import log,", "------- tuple tuple with the dataset with only the features selected and the", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection with", "None and self._down is None def get_down(self) -> \"Snode\": return self._down def get_up(self)", "measure the quality of a split (only used if max_features != num_features). Supported", "be taken into account to split dataset Returns ------- np.array column of dataset", "max_features best features. “random”: The algorithm generates 5 candidates and choose the best", "features to form the subspace Returns ------- tuple tuple with the dataset with", "tup = y[data[:, col] > 0] tdn = y[data[:, col] <= 0] info_gain", "weight : np.ndarray, optional weights applied to input dataset in train time, by", "features in dataset Returns ------- list list with up to 5 combination of", "max_features) return dataset[:, indices], indices def _impurity(self, data: np.array, y: np.array) -> np.array:", "of the dataset max_features : int number of features of the subspace (<=", "of max_features length. Depending on hyperparameter Parameters ---------- dataset : np.array array of", "n_labels = len(y) if n_labels <= 1: return 0 counts = np.bincount(y) proportions", "return self def _push(self, node: Snode): if node is not None: self._stack.append(node) def", "np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit", "incompatible with 'ovo' multiclass_strategy, by default None min_samples_split : int, optional The minimum", "else None, ] def _distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances", "scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y,", "labels, max_features) def get_subspace( self, dataset: np.array, labels: np.array, max_features: int ) ->", "if any, by default None \"\"\" def __init__( self, clf: SVC, X: np.ndarray,", "dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset:", "The algorithm generates 5 candidates and choose the best (max. info. gain) of", "dataset) Returns ------- tuple indices of the features selected \"\"\" # Random feature", "scaler used if any, by default None \"\"\" def __init__( self, clf: SVC,", "with the distances of every sample to the hyperplane of every class. nc", "feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] )", ": int, optional The minimum number of samples required to split an internal", "time - False \"\"\" # data contains the distances of every sample to", "def __next__(self) -> Snode: if len(self._stack) == 0: raise StopIteration() node = self._stack.pop()", ") self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\")", "most 5 feature random combinations Parameters ---------- features : int number of features", "clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity: float, title: str, weight:", "must be gini or entropy got({criterion})\" ) if criteria not in [ \"max_samples\",", "mutual, cfs, fcbf} \"\"\" def __init__( self, clf: SVC = None, criterion: str", "list with up to 5 combination of features randomly selected \"\"\" comb =", "> 1: # split criteria for multiclass # Convert data to a (m,", "origin[self._up] if any(self._up) else None, origin[down] if any(down) else None, ] def _distances(self,", "self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn = 0 if labels_up is", "Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y =", "None \"\"\" def __init__( self, clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array,", "selected by splitter depending on the self._feature_select hyper parameter Parameters ---------- dataset :", "\"\"\"Split an array in two based on indices (self._up) and its complement partition", "samples) if data.shape[0] < self._min_samples_split: # there aren't enough samples to split self._up", "= None def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return", "hyperplane of every class. nc = # of classes \"\"\" X_transformed = data[:,", "feature_select: str = None, criteria: str = None, min_samples_split: int = None, random_state=None,", "train : bool Train time - True / Test time - False \"\"\"", "-> tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters ---------- dataset : np.array", "of a labels set Parameters ---------- y : np.array set of labels Returns", "No partition is producing information gain data = np.ones(data.shape) data = data[:, col]", "if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must", "= clf self._random_state = random_state if random_state is not None: random.seed(random_state) self._criterion =", "features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def", "if node is not None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) ==", "a (m, 1) array selecting values for samples if train: # in train", "“best”: sklearn SelectKBest algorithm is used in every node to choose the max_features", "= data[:, col] self._up = data > 0 def part(self, origin: np.array) ->", "array in two based on indices (self._up) and its complement partition has to", "keeps the svm classifier and if testing the dataset assigned to it Parameters", "estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be", "None def get_down(self) -> \"Snode\": return self._down def get_up(self) -> \"Snode\": return self._up", "information gain of a split candidate Parameters ---------- labels : np.array labels of", "= 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set],", "= max(card) self._class = classes[card == max_card][0] self._belief = max_card / np.sum(card) else:", "impurity and “entropy” for the information gain., by default \"entropy\", by default None", "(< number of features in dataset) Returns ------- tuple indices of the features", "node only if it is a leaf \"\"\" if not self.is_leaf(): return classes,", "the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def", "self._feature_select hyper parameter Parameters ---------- dataset : np.array array of samples labels :", "max_card = max(card) self._class = classes[card == max_card][0] self._belief = max_card / np.sum(card)", "the best set of features among feature_sets, the criterion is the information gain", "max_gain: selected = col max_gain = info_gain return selected @staticmethod def _max_samples(data: np.array,", "---------- clf : SVC Classifier used X : np.ndarray input dataset in train", ") -> list: \"\"\"Return the best set of features among feature_sets, the criterion", "with max_features limit Parameters ---------- dataset : np.array array of samples labels :", "bool: return self._up is None and self._down is None def get_down(self) -> \"Snode\":", "minimum number of samples required to split an internal node. 0 (default) for", "column computed in train time # is taking the classifier of class <col>", "< set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self, dataset: np.array,", "use to split the dataset in a node. max_samples is incompatible with 'ovo'", "np.array vector of labels (classes) Returns ------- np.array column of dataset to be", "train: bool): \"\"\"Set the criteria to split arrays. Compute the indices of the", "f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array, max_features:", "reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod", "np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import", "labels, max_features) return dataset[:, indices], indices def _impurity(self, data: np.array, y: np.array) ->", "is_leaf(self) -> bool: return self._up is None and self._down is None def get_down(self)", "(only in testing) y : np.ndarray input labes in train time features :", "split Returns ------- list list with two splits of the array \"\"\" down", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best of five", "to split an internal node. 0 (default) for any, by default None random_state", "selected \"\"\" # Random feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features)", "samples, # features) labels : np.array array of labels features_sets : list list", "ValueError criterion must be gini or entropy ValueError criteria has to be max_samples", "in proportions: if prop != 0.0: entropy -= prop * log(prop, n_classes) return", "in dataset Returns ------- list list with up to 5 combination of features", "np.array distances to hyper plane of every class y : np.array vector of", "account to split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in", "SVC Classifier used X : np.ndarray input dataset in train time (only in", "samples that reach it , by default False Raises ------ ValueError clf has", "= ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features =", "Parameters ---------- dataset : np.array array of samples (# samples, # features) labels", "def set_classifier(self, clf): self._clf = clf def set_features(self, features): self._features = features def", "used to choose the feature set at each node (only used if max_features", "self._normalize = normalize if clf is None: raise ValueError(f\"clf has to be a", "\"\"\"Compute the indices of the features selected by splitter depending on the self._feature_select", "else feature_set @staticmethod def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at most", "max_card / np.sum(card) else: self._belief = 1 try: self._class = classes[0] except IndexError:", "Snode, train: bool): \"\"\"Set the criteria to split arrays. Compute the indices of", "= self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn = 0 if labels_up", "@staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based", "to split dataset Parameters ---------- data : np.array distances to hyper plane of", "= self.information_gain(labels, y1, y2) if gain > max_gain: max_gain = gain selected =", "0 if labels_up is not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if", "f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack =", "the Gini impurity and “entropy” for the information gain., by default \"entropy\", by", "\"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler,", "-> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _, count =", "of features in dataset) Returns ------- tuple indices of the features selected \"\"\"", "got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini", "\" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function =", "max(card) self._class = classes[card == max_card][0] self._belief = max_card / np.sum(card) else: self._belief", "nc) with the distances of every sample to the hyperplane of every class.", "by default None \"\"\" def __init__( self, clf: SVC, X: np.ndarray, y: np.ndarray,", "a multi class classification) which column (class) use to split the dataset in", "criteria to split arrays. Compute the indices of the samples that should go", "0.0 else: result = ( imp_prev - (card_up / samples) * imp_up -", "combination max_features : int number of features in dataset Returns ------- list list", "getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array, max_features: int ) ->", "0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels)", "up to 5 combination of features randomly selected \"\"\" comb = set() #", "Correlation- Based, by default None criteria : str, optional ecides (just in case", "np.array labels on the other side Returns ------- float information gain \"\"\" imp_prev", "self._class = classes[0] except IndexError: self._class = None def __str__(self) -> str: count_values", "sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features: int", "of the features selected \"\"\" # return best features with mutual info with", "candidate Parameters ---------- labels : np.array labels of the dataset labels_up : np.array", ": np.ndarray input dataset in train time (only in testing) y : np.ndarray", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Compute the indices of", "node: Snode, train: bool): \"\"\"Set the criteria to split arrays. Compute the indices", "-> tuple: \"\"\"Return the best features with mutual information with labels Parameters ----------", "-> np.array: \"\"\"Compute distances of the samples to the hyperplane of the node", "self._clf = clf self._title = title self._belief = 0.0 # Only store dataset", "ecides (just in case of a multi class classification) which column (class) use", "To complete the iterator interface return self def _push(self, node: Snode): if node", "in testing) y : np.ndarray input labes in train time features : np.array", "\"\"\" import os import warnings import random from math import log, factorial import", "0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\"", "1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy", "feature set at each node (only used if max_features < num_features). Supported strategies", "ValueError criteria has to be max_samples or impurity ValueError splitter must be in", "in train time # is taking the classifier of class <col> col =", ": int number of features of the subspace (< number of features in", "train time we have to compute the column to take into # account", "np.array) -> np.array: \"\"\"return column of dataset to be taken into account to", "to be max_samples or impurity ValueError splitter must be in {random, best, mutual,", "a split (only used if max_features != num_features). Supported criteria are “gini” for", "default None \"\"\" def __init__( self, clf: SVC, X: np.ndarray, y: np.ndarray, features:", "if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \"", "dataset \"\"\" # select the class with max number of samples _, samples", "the route to the node weight : np.ndarray, optional weights applied to input", "Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by default None criteria", "default None scaler : StandardScaler, optional scaler used if any, by default None", "internal node. 0 (default) for any, by default None random_state : optional Controls", "np.array, node: Snode, train: bool): \"\"\"Set the criteria to split arrays. Compute the", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best features", "[ origin[self._up] if any(self._up) else None, origin[down] if any(down) else None, ] def", "selected = col max_gain = info_gain return selected @staticmethod def _max_samples(data: np.array, y:", "indices Parameters ---------- origin : np.array dataset to split Returns ------- list list", "set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self) -> int: return self._partition_column def", "- (card_dn / samples) * imp_dn ) return result def _select_best_set( self, dataset:", "used if max_features != num_features). Supported criteria are “gini” for the Gini impurity", "* imp_up - (card_dn / samples) * imp_dn ) return result def _select_best_set(", "in predcit time just use the column computed in train time # is", "int number of features to form the subspace Returns ------- tuple tuple with", "\"\"\"Correlattion-based feature selection with max_features limit Parameters ---------- dataset : np.array array of", "if len(classes) > 1: max_card = max(card) self._class = classes[card == max_card][0] self._belief", "str = None, criteria: str = None, min_samples_split: int = None, random_state=None, normalize=False,", "_distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of the samples to", "import SVC from sklearn.exceptions import ConvergenceWarning from mufs import MUFS class Snode: \"\"\"", "feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets)", "\"NS\") != \"NS\" else None ) self._features = features self._impurity = impurity self._partition_column:", "information gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn", "got({criterion})\" ) if criteria not in [ \"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria", "= labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not None: card_dn = labels_dn.shape[0]", "list best feature set \"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)", "= MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array:", "with the dataset with only the features selected and the indices of the", "self._partition_column def set_down(self, son): self._down = son def set_title(self, title): self._title = title", "best features with mutual information with labels Parameters ---------- dataset : np.array array", "of features in dataset Returns ------- list list with up to 5 combination", "---------- clf : SVC, optional classifier, by default None criterion : str, optional", "= feature_select self._normalize = normalize if clf is None: raise ValueError(f\"clf has to", "indices of the features selected \"\"\" # return best features with mutual info", "criterion : str, optional The function to measure the quality of a split", "col == -1: # No partition is producing information gain data = np.ones(data.shape)", "int, optional The minimum number of samples required to split an internal node.", "self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def", "node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if gain", "= ( imp_prev - (card_up / samples) * imp_up - (card_dn / samples)", "# To complete the iterator interface return self def _push(self, node: Snode): if", "0 entropy = 0.0 # Compute standard entropy. for prop in proportions: if", "= y[data[:, col] <= 0] info_gain = self.information_gain(y, tup, tdn) if info_gain >", "\"\"\"Return the best of five random feature set combinations Parameters ---------- dataset :", "sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from mufs import MUFS class Snode:", "labels of the dataset max_features : int number of features of the subspace", "of labels Returns ------- float entropy \"\"\" n_labels = len(y) if n_labels <=", "np.sum(card) else: self._belief = 1 try: self._class = classes[0] except IndexError: self._class =", "to the node weight : np.ndarray, optional weights applied to input dataset in", "(just in case of a multi class classification) which column (class) use to", "X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y = y self._down =", "set Parameters ---------- y : np.array set of labels Returns ------- float entropy", "_generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at most 5 feature random combinations", "selection with max_features limit Parameters ---------- dataset : np.array array of samples labels", "mutual information with labels Parameters ---------- dataset : np.array array of samples labels", "of features sets to check Returns ------- list best feature set \"\"\" max_gain", "\"\"\" max_gain = 0 selected = -1 for col in range(data.shape[1]): tup =", "an array in two based on indices (self._up) and its complement partition has", "= classes[0] except IndexError: self._class = None def __str__(self) -> str: count_values =", "of labels features_sets : list list of features sets to check Returns -------", "have to compute the column to take into # account to split the", "= scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X,", "tup, tdn) if info_gain > max_gain: selected = col max_gain = info_gain return", "clf def set_features(self, features): self._features = features def set_impurity(self, impurity): self._impurity = impurity", "mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning", "samples required to split an internal node. 0 (default) for any, by default", "-1: # No partition is producing information gain data = np.ones(data.shape) data =", "train time (only in testing) y : np.ndarray input labes in train time", "labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute information gain of a split", "labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features: int )", "applied to input dataset in train time, by default None scaler : StandardScaler,", "Snode): self._stack = [] self._push(tree) def __iter__(self): # To complete the iterator interface", "in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset, labels, feature_set, 0.0,", "at most 5 combinations number = factorial(features) / ( factorial(max_features) * factorial(features -", "labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection with max_features limit", "sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from", "is not None: card_dn = labels_dn.shape[0] if labels_dn is not None else 0", "import random from math import log, factorial import numpy as np from sklearn.feature_selection", "dataset) Returns ------- tuple indices of the features selected \"\"\" # No feature", "Parameters ---------- data : np.array distances to hyper plane of every class y", "indices of the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:,", "by default None scaler : StandardScaler, optional scaler used if any, by default", "criterion: str = None, feature_select: str = None, criteria: str = None, min_samples_split:", "None ) self._features = features self._impurity = impurity self._partition_column: int = -1 self._scaler", "= features self._impurity = impurity self._partition_column: int = -1 self._scaler = scaler @classmethod", "len(self._stack) == 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class", "= np.ones(data.shape) data = data[:, col] self._up = data > 0 def part(self,", "self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best", "1 try: self._class = classes[0] except IndexError: self._class = None def __str__(self) ->", "the node weight : np.ndarray, optional weights applied to input dataset in train", "be max_samples or impurity; got ({criteria})\" ) if feature_select not in [\"random\", \"best\",", "None: raise ValueError(f\"clf has to be a sklearn estimator, got({clf})\") if criterion not", "np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set", "samples) * imp_dn ) return result def _select_best_set( self, dataset: np.array, labels: np.array,", "int = None, random_state=None, normalize=False, ): self._clf = clf self._random_state = random_state if", "return 0 counts = np.bincount(y) proportions = counts / n_labels n_classes = np.count_nonzero(proportions)", "None, scaler: StandardScaler = None, ): self._clf = clf self._title = title self._belief", "def _push(self, node: Snode): if node is not None: self._stack.append(node) def __next__(self) ->", "applied on each node with the samples that reach it , by default", "two based on different criteria Parameters ---------- clf : SVC, optional classifier, by", "feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features: int )", "dataset) Returns ------- tuple indices of the features selected \"\"\" return ( SelectKBest(k=max_features)", "samples if train: # in train time we have to compute the column", "-> Snode: if len(self._stack) == 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down())", "Only store dataset in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\"", "the best features w.r.t. their mutual info with the label. \"cfs\": Apply Correlation-based", "called first to establish up indices Parameters ---------- origin : np.array dataset to", "indices of the features selected by splitter depending on the self._feature_select hyper parameter", "------- tuple indices of the features selected \"\"\" # Random feature reduction n_features", "np.ndarray input dataset in train time (only in testing) y : np.ndarray input", "(up) Parameters ---------- samples : np.array array of samples (# samples, # features)", "Parameters ---------- samples : np.array array of samples (# samples, # features) node", "\"\"\" Oblique decision tree classifier based on SVM nodes Splitter class \"\"\" import", "if labels_up is not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn", "of every class y : np.array vector of labels (classes) Returns ------- np.array", "np.array features used to compute hyperplane impurity : float impurity of the node", "complement partition has to be called first to establish up indices Parameters ----------", "features in each combination max_features : int number of features in dataset Returns", "col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time just use the", "made train : bool Train time - True / Test time - False", "= impurity def get_title(self) -> str: return self._title def get_classifier(self) -> SVC: return", "labels of the dataset max_features : int number of features to form the", "None self._feature = None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\"", "set_impurity(self, impurity): self._impurity = impurity def get_title(self) -> str: return self._title def get_classifier(self)", "to choose the max_features best features. “random”: The algorithm generates 5 candidates and", "node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column = col def", "\"\"\"Return the best features with mutual information with labels Parameters ---------- dataset :", "selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset:", "cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self,", "reproducible output across multiple function calls, by default None normalize : bool, optional", "with mutual info with the label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted(", "data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there aren't enough samples", "self._down = son def set_title(self, title): self._title = title def set_classifier(self, clf): self._clf", "return 0 entropy = 0.0 # Compute standard entropy. for prop in proportions:", "with two splits of the array \"\"\" down = ~self._up return [ origin[self._up]", "features selected \"\"\" # No feature reduction n_features = dataset.shape[1] if n_features ==", "set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self, dataset: np.array, labels:", "return self._up def make_predictor(self): \"\"\"Compute the class of the predictor and its belief", "-> tuple: \"\"\"Compute the indices of the features selected by splitter depending on", "list with two splits of the array \"\"\" down = ~self._up return [", "np.array) -> float: _, count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count /", "get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class of the predictor", "selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node", "np.array, impurity: float, title: str, weight: np.ndarray = None, scaler: StandardScaler = None,", "node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column = col", "dataset in train time, by default None scaler : StandardScaler, optional scaler used", "that reach it , by default False Raises ------ ValueError clf has to", "max_features : int number of features in dataset Returns ------- list list with", "card_dn if samples == 0: return 0.0 else: result = ( imp_prev -", "== 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter:", "n_classes = np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy = 0.0 #", "features : int number of features in each combination max_features : int number", "split arrays. Compute the indices of the samples that should go to one", "y : np.array vector of labels (classes) Returns ------- np.array column of dataset", "class classification) which column (class) use to split the dataset in a node.", "\"max_samples\", \"impurity\", ]: raise ValueError( f\"criteria has to be max_samples or impurity; got", "that keeps the svm classifier and if testing the dataset assigned to it", "5 combinations number = factorial(features) / ( factorial(max_features) * factorial(features - max_features) )", "else: result = ( imp_prev - (card_up / samples) * imp_up - (card_dn", "list list of features sets to check Returns ------- list best feature set", "number of features to form the subspace Returns ------- tuple tuple with the", "labels_dn: np.array ) -> float: \"\"\"Compute information gain of a split candidate Parameters", "y1, y2) if gain > max_gain: max_gain = gain selected = feature_set return", "be in {random, best, mutual, cfs, fcbf} \"\"\" def __init__( self, clf: SVC", "= normalize if clf is None: raise ValueError(f\"clf has to be a sklearn", "self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def _impurity(self, data: np.array, y: np.array)", "split an internal node. 0 (default) for any, by default None random_state :", "producing information gain data = np.ones(data.shape) data = data[:, col] self._up = data", "ValueError( f\"criterion must be gini or entropy got({criterion})\" ) if criteria not in", "prop in proportions: if prop != 0.0: entropy -= prop * log(prop, n_classes)", "None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select =", "tuple indices of the features selected \"\"\" # Random feature reduction n_features =", "import os import warnings import random from math import log, factorial import numpy", "candidates and choose the best (max. info. gain) of them. \"mutual\": Chooses the", "0: return 0.0 else: result = ( imp_prev - (card_up / samples) *", "= son def is_leaf(self) -> bool: return self._up is None and self._down is", "(self._up) and its complement partition has to be called first to establish up", "return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod", ": float impurity of the node title : str label describing the route", "counts = np.bincount(y) proportions = counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes", "# features) labels : np.array array of labels features_sets : list list of", "arrays. Compute the indices of the samples that should go to one side", "hyperparameter Parameters ---------- dataset : np.array array of samples (# samples, # features)", "-1 self._scaler = scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return cls(", "in a node. max_samples is incompatible with 'ovo' multiclass_strategy, by default None min_samples_split", "selected = feature_set return selected if selected is not None else feature_set @staticmethod", "features) node : Snode Node of the tree where partition is going to", "to be called first to establish up indices Parameters ---------- origin : np.array", "f\"criteria has to be max_samples or impurity; got ({criteria})\" ) if feature_select not", "multiple function calls, by default None normalize : bool, optional If standardization of", "samples to the hyperplane of the node Parameters ---------- node : Snode node", "of samples (# samples, # features) labels : np.array labels of the dataset", "warnings import random from math import log, factorial import numpy as np from", "indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels,", "return self._features def set_up(self, son): self._up = son def is_leaf(self) -> bool: return", "criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize = normalize", "shape (m, nc) with the distances of every sample to the hyperplane of", "with the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based,", "scaler: StandardScaler = None, ): self._clf = clf self._title = title self._belief =", "every class. nc = # of classes \"\"\" X_transformed = data[:, node._features] if", "None criterion : str, optional The function to measure the quality of a", "num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm is used in every node", "(classes) Returns ------- np.array column of dataset to be taken into account to", "def set_impurity(self, impurity): self._impurity = impurity def get_title(self) -> str: return self._title def", "of every sample to every class hyperplane # array of (m, nc) nc", "def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title}", "of the selected dataset of max_features length. Depending on hyperparameter Parameters ---------- dataset", "-> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf", "labels of one side labels_dn : np.array labels on the other side Returns", "based on indices (self._up) and its complement partition has to be called first", "None feature_select : str, optional The strategy used to choose the feature set", "hyper plane of every class y : np.array vector of labels (classes) Returns", "__init__(self, tree: Snode): self._stack = [] self._push(tree) def __iter__(self): # To complete the", "= self.part(labels) gain = self.information_gain(labels, y1, y2) if gain > max_gain: max_gain =", "features_sets : list list of features sets to check Returns ------- list best", "\"splitter must be in {random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\" )", "True / Test time - False \"\"\" # data contains the distances of", "the distances of every sample to the hyperplane of every class. nc =", "number of samples required to split an internal node. 0 (default) for any,", ") -> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters ---------- dataset", "iterator interface return self def _push(self, node: Snode): if node is not None:", "hyperplane of the node Parameters ---------- node : Snode node containing the svm", "<gh_stars>1-10 \"\"\" Oblique decision tree classifier based on SVM nodes Splitter class \"\"\"", ")[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features: int ) ->", "getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array,", "= dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) # select features as selected", "if testing the dataset assigned to it Parameters ---------- clf : SVC Classifier", "= min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize = normalize if clf", "): self._clf = clf self._random_state = random_state if random_state is not None: random.seed(random_state)", "of a multi class classification) which column (class) use to split the dataset", "of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod", "Node of the tree where partition is going to be made train :", "= node.get_partition_column() if col == -1: # No partition is producing information gain", "choose the feature set at each node (only used if max_features < num_features).", "= feature_set return selected if selected is not None else feature_set @staticmethod def", "of the samples that should go to one side of the tree (up)", "classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card) self._class", "_entropy(y: np.array) -> float: \"\"\"Compute entropy of a labels set Parameters ---------- y", "of the dataset max_features : int number of features to form the subspace", "to take into # account to split the dataset col = self.decision_criteria(data, node._y)", ": Snode node containing the svm classifier data : np.ndarray samples to compute", "max_features: int) -> list: \"\"\"Generate at most 5 feature random combinations Parameters ----------", "int number of features of the subspace (< number of features in dataset)", "StandardScaler, optional scaler used if any, by default None \"\"\" def __init__( self,", "int, max_features: int) -> list: \"\"\"Generate at most 5 feature random combinations Parameters", "to split arrays. Compute the indices of the samples that should go to", "input dataset in train time (only in testing) y : np.ndarray input labes", "return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int )", "-1 for col in range(data.shape[1]): tup = y[data[:, col] > 0] tdn =", "np.array, max_features: int ) -> tuple: \"\"\"Return the best features with mutual information", "nc = # classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: #", "is None: raise ValueError(f\"clf has to be a sklearn estimator, got({clf})\") if criterion", "dataset: np.array, labels: np.array, features_sets: list ) -> list: \"\"\"Return the best set", "return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute", ": np.array features used to compute hyperplane impurity : float impurity of the", "(# samples, # features) node : Snode Node of the tree where partition", "selected \"\"\" # return best features with mutual info with the label feature_list", "class y : np.array column of dataset to be taken into account to", "= title self._belief = 0.0 # Only store dataset in Testing self._X =", "np.array, max_features: int ) -> tuple: \"\"\"Compute the indices of the features selected", "n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def", "Depending on hyperparameter Parameters ---------- dataset : np.array array of samples (# samples,", "of samples labels : np.array labels of the dataset max_features : int number", "np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy of a", "None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not None: card_dn", "range(data.shape[1]): tup = y[data[:, col] > 0] tdn = y[data[:, col] <= 0]", "the node Parameters ---------- node : Snode node containing the svm classifier data", "mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria =", "probability estimates. Ignored when probability is False.Pass an int for reproducible output across", "= 0 if labels_up is not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up)", "( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels: np.array,", "number of features in dataset) Returns ------- tuple indices of the features selected", "<= 1: return 0 entropy = 0.0 # Compute standard entropy. for prop", "from sklearn.exceptions import ConvergenceWarning from mufs import MUFS class Snode: \"\"\" Nodes of", "distances of the samples to the hyperplane of the node Parameters ---------- node", "5 combination of features randomly selected \"\"\" comb = set() # Generate at", "distance to hyperplane Returns ------- np.array array of shape (m, nc) with the", "features) labels : np.array labels of the dataset max_features : int number of", "estimates. Ignored when probability is False.Pass an int for reproducible output across multiple", "variabes with higher f-score Parameters ---------- dataset : np.array array of samples labels", "int: return self._partition_column def set_down(self, son): self._down = son def set_title(self, title): self._title", "label describing the route to the node weight : np.ndarray, optional weights applied", "labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best features with mutual", "else None ) self._features = features self._impurity = impurity self._partition_column: int = -1", "str = None, min_samples_split: int = None, random_state=None, normalize=False, ): self._clf = clf", "np.array) -> list: \"\"\"Split an array in two based on indices (self._up) and", "else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn if samples ==", "compute hyperplane impurity : float impurity of the node title : str label", "node with the samples that reach it , by default False Raises ------", "default None min_samples_split : int, optional The minimum number of samples required to", "into account to split dataset Returns ------- np.array column of dataset to be", "self.criterion_function(labels_dn) samples = card_up + card_dn if samples == 0: return 0.0 else:", "self._impurity = impurity def get_title(self) -> str: return self._title def get_classifier(self) -> SVC:", "list list with up to 5 combination of features randomly selected \"\"\" comb", "= None, criteria: str = None, min_samples_split: int = None, random_state=None, normalize=False, ):", "= 0 selected = -1 for col in range(data.shape[1]): tup = y[data[:, col]", "def set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self) -> int: return self._partition_column", "float: _, count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod", "of a split candidate Parameters ---------- labels : np.array labels of the dataset", "except IndexError: self._class = None def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True)", "cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col:", "comb = set() # Generate at most 5 combinations number = factorial(features) /", "taking the classifier of class <col> col = node.get_partition_column() if col == -1:", ") return list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int )", "split dataset \"\"\" # select the class with max number of samples _,", "False Raises ------ ValueError clf has to be a sklearn estimator ValueError criterion", "Chooses the best features w.r.t. their mutual info with the label. \"cfs\": Apply", "node weight : np.ndarray, optional weights applied to input dataset in train time,", "np.array array of samples (# samples, # features) node : Snode Node of", "np.bincount(y) proportions = counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1:", "sklearn.exceptions import ConvergenceWarning from mufs import MUFS class Snode: \"\"\" Nodes of the", "ValueError clf has to be a sklearn estimator ValueError criterion must be gini", "n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy = 0.0", "-> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters ---------- dataset :", "labels set Parameters ---------- y : np.array set of labels Returns ------- float", "_fs_cfs( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection", "taken into account to split dataset \"\"\" max_gain = 0 selected = -1", "\"\"\" n_labels = len(y) if n_labels <= 1: return 0 counts = np.bincount(y)", "# Only store dataset in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") !=", "f-score Parameters ---------- dataset : np.array array of samples labels : np.array labels", "self._stack = [] self._push(tree) def __iter__(self): # To complete the iterator interface return", "the samples that should go to one side of the tree (up) Parameters", "== 0: return 0.0 else: result = ( imp_prev - (card_up / samples)", "-> bool: return self._up is None and self._down is None def get_down(self) ->", "np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy of a labels set", "f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \"", "by default None criterion : str, optional The function to measure the quality", "Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of the samples to the hyperplane", "\"\"\" # data contains the distances of every sample to every class hyperplane", "the indices of the samples that should go to one side of the", "> max_gain: selected = col max_gain = info_gain return selected @staticmethod def _max_samples(data:", "col: int): self._partition_column = col def get_partition_column(self) -> int: return self._partition_column def set_down(self,", "indices], indices def _impurity(self, data: np.array, y: np.array) -> np.array: \"\"\"return column of", "criterion is the information gain Parameters ---------- dataset : np.array array of samples", "-> tuple: \"\"\"Return the best of five random feature set combinations Parameters ----------", "self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize = normalize if", "def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at most 5 feature random", "number of features of the subspace (<= number of features in dataset) Returns", "\"\"\" def __init__( self, clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity:", "---------- origin : np.array dataset to split Returns ------- list list with two", "dataset assigned to it Parameters ---------- clf : SVC Classifier used X :", "self._class = classes[card == max_card][0] self._belief = max_card / np.sum(card) else: self._belief =", "= -1 self._scaler = scaler @classmethod def copy(cls, node: \"Snode\") -> \"Snode\": return", "return entropy def information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array ) ->", "- (card_up / samples) * imp_up - (card_dn / samples) * imp_dn )", "def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to be", "selected and the indices of the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels,", "go to one side of the tree (up) Parameters ---------- samples : np.array", "-> tuple: \"\"\"Return the variabes with higher f-score Parameters ---------- dataset : np.array", "= impurity self._partition_column: int = -1 self._scaler = scaler @classmethod def copy(cls, node:", "(m, nc) nc = # classes data = self._distances(node, samples) if data.shape[0] <", "\"\"\"Compute entropy of a labels set Parameters ---------- y : np.array set of", "= clf def set_features(self, features): self._features = features def set_impurity(self, impurity): self._impurity =", "input dataset in train time, by default None scaler : StandardScaler, optional scaler", "\"Snode\": return self._down def get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the", "info_gain = self.information_gain(y, tup, tdn) if info_gain > max_gain: selected = col max_gain", "ValueError splitter must be in {random, best, mutual, cfs, fcbf} \"\"\" def __init__(", "SVC, optional classifier, by default None criterion : str, optional The function to", "class. nc = # of classes \"\"\" X_transformed = data[:, node._features] if self._normalize:", "col max_gain = info_gain return selected @staticmethod def _max_samples(data: np.array, y: np.array) ->", "is not None: self._stack.append(node) def __next__(self) -> Snode: if len(self._stack) == 0: raise", "mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int ) ->", "as selected in constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array,", "svm classifier data : np.ndarray samples to compute distance to hyperplane Returns -------", "/ ( factorial(max_features) * factorial(features - max_features) ) set_length = min(5, number) while", "np.ndarray, features: np.array, impurity: float, title: str, weight: np.ndarray = None, scaler: StandardScaler", "has to be called first to establish up indices Parameters ---------- origin :", ") -> tuple: \"\"\"Return the best features with mutual information with labels Parameters", "list of features sets to check Returns ------- list best feature set \"\"\"", "plane of every class y : np.array column of dataset to be taken", "class y : np.array vector of labels (classes) Returns ------- np.array column of", "if train: # in train time we have to compute the column to", "every class hyperplane # array of (m, nc) nc = # classes data", "self._features def set_up(self, son): self._up = son def is_leaf(self) -> bool: return self._up", "weights applied to input dataset in train time, by default None scaler :", "None self._up = None self._class = None self._feature = None self._sample_weight = (", "self._clf = clf def set_features(self, features): self._features = features def set_impurity(self, impurity): self._impurity", "as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm", "data to a (m, 1) array selecting values for samples if train: #", ") def set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self) -> int: return", "five random feature set combinations Parameters ---------- dataset : np.array array of samples", "@staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return", "SVC from sklearn.exceptions import ConvergenceWarning from mufs import MUFS class Snode: \"\"\" Nodes", "max_features) ) set_length = min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features)))", "max_features limit Parameters ---------- dataset : np.array array of samples labels : np.array", "tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters ---------- dataset : np.array", "float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up =", "np.array: \"\"\"Compute distances of the samples to the hyperplane of the node Parameters", "algorithm is used in every node to choose the max_features best features. “random”:", "strategies are: “best”: sklearn SelectKBest algorithm is used in every node to choose", "be taken into account to split dataset Parameters ---------- data : np.array distances", ": int number of features to form the subspace Returns ------- tuple tuple", "data = data[:, col] self._up = data > 0 def part(self, origin: np.array)", "impurity : float impurity of the node title : str label describing the", "son def set_title(self, title): self._title = title def set_classifier(self, clf): self._clf = clf", "from math import log, factorial import numpy as np from sklearn.feature_selection import SelectKBest,", "Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features}", "np.array column of dataset to be taken into account to split dataset \"\"\"", "features sets to check Returns ------- list best feature set \"\"\" max_gain =", "distances of every sample to the hyperplane of every class. nc = #", "y: np.ndarray, features: np.array, impurity: float, title: str, weight: np.ndarray = None, scaler:", "data[:, col] self._up = data > 0 def part(self, origin: np.array) -> list:", "two splits of the array \"\"\" down = ~self._up return [ origin[self._up] if", "No feature reduction n_features = dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) #", "computed in train time # is taking the classifier of class <col> col", "their mutual info with the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply", "the subspace Returns ------- tuple tuple with the dataset with only the features", "Selection. \"fcbf\": Apply Fast Correlation- Based, by default None criteria : str, optional", "---------- y : np.array set of labels Returns ------- float entropy \"\"\" n_labels", "clf: SVC = None, criterion: str = None, feature_select: str = None, criteria:", "the other side Returns ------- float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up", "samples (# samples, # features) labels : np.array labels of the dataset max_features", "the subdataset of the node only if it is a leaf \"\"\" if", "self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn a subspace", "self._partition_column = col def get_partition_column(self) -> int: return self._partition_column def set_down(self, son): self._down", "up indices Parameters ---------- origin : np.array dataset to split Returns ------- list", "def make_predictor(self): \"\"\"Compute the class of the predictor and its belief based on", "of (m, nc) nc = # classes data = self._distances(node, samples) if data.shape[0]", "optional If standardization of features should be applied on each node with the", "import ConvergenceWarning from mufs import MUFS class Snode: \"\"\" Nodes of the tree", "the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by", "features of the subspace (< number of features in dataset) Returns ------- tuple", "self._up = data > 0 def part(self, origin: np.array) -> list: \"\"\"Split an", "def get_classifier(self) -> SVC: return self._clf def get_impurity(self) -> float: return self._impurity def", "get_down(self) -> \"Snode\": return self._down def get_up(self) -> \"Snode\": return self._up def make_predictor(self):", "if max_features != num_features). Supported criteria are “gini” for the Gini impurity and", "best, mutual, cfs, fcbf} \"\"\" def __init__( self, clf: SVC = None, criterion:", "col] self._up = data > 0 def part(self, origin: np.array) -> list: \"\"\"Split", "time, by default None scaler : StandardScaler, optional scaler used if any, by", "def __iter__(self): # To complete the iterator interface return self def _push(self, node:", "# No feature reduction n_features = dataset.shape[1] if n_features == max_features: return tuple(range(n_features))", "Parameters ---------- features : int number of features in each combination max_features :", "feature_select : str, optional The strategy used to choose the feature set at", "gain Parameters ---------- dataset : np.array array of samples (# samples, # features)", "- Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title}", "selected is not None else feature_set @staticmethod def _generate_spaces(features: int, max_features: int) ->", "_max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to be taken", "splitter must be in {random, best, mutual, cfs, fcbf} \"\"\" def __init__( self,", "if samples == 0: return 0.0 else: result = ( imp_prev - (card_up", "\"\"\"Re3turn a subspace of the selected dataset of max_features length. Depending on hyperparameter", "information with labels Parameters ---------- dataset : np.array array of samples labels :", ": np.array array of labels features_sets : list list of features sets to", "of the features selected by splitter depending on the self._feature_select hyper parameter Parameters", ": np.array set of labels Returns ------- float entropy \"\"\" n_labels = len(y)", "title): self._title = title def set_classifier(self, clf): self._clf = clf def set_features(self, features):", "split candidate Parameters ---------- labels : np.array labels of the dataset labels_up :", "tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters ---------- dataset : np.array array", "belief based on the subdataset of the node only if it is a", "pseudo random number generation for shuffling the data for probability estimates. Ignored when", "return node class Splitter: \"\"\" Splits a dataset in two based on different", "tuple with the dataset with only the features selected and the indices of", "info with the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation-", "y : np.array column of dataset to be taken into account to split", "data contains the distances of every sample to every class hyperplane # array", "impurity def get_title(self) -> str: return self._title def get_classifier(self) -> SVC: return self._clf", "---------- node : Snode node containing the svm classifier data : np.ndarray samples", "array \"\"\" down = ~self._up return [ origin[self._up] if any(self._up) else None, origin[down]", "card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not None: card_dn =", "node: Snode): if node is not None: self._stack.append(node) def __next__(self) -> Snode: if", "proportions: if prop != 0.0: entropy -= prop * log(prop, n_classes) return entropy", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection with max_features", "by splitter depending on the self._feature_select hyper parameter Parameters ---------- dataset : np.array", "tuple: \"\"\"Re3turn a subspace of the selected dataset of max_features length. Depending on", "labels) node = Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node,", "time - True / Test time - False \"\"\" # data contains the", "= len(y) if n_labels <= 1: return 0 counts = np.bincount(y) proportions =", "feature_select self._normalize = normalize if clf is None: raise ValueError(f\"clf has to be", "gain) of them. \"mutual\": Chooses the best features w.r.t. their mutual info with", "entropy. for prop in proportions: if prop != 0.0: entropy -= prop *", "int ) -> tuple: \"\"\"Re3turn a subspace of the selected dataset of max_features", "self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _, count = np.unique(y, return_counts=True) return", "f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self,", "across multiple function calls, by default None normalize : bool, optional If standardization", "self, labels: np.array, labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute information gain", "= None, feature_select: str = None, criteria: str = None, min_samples_split: int =", "range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels: np.array,", "limit Parameters ---------- dataset : np.array array of samples labels : np.array labels", "just use the column computed in train time # is taking the classifier", "Gini impurity and “entropy” for the information gain., by default \"entropy\", by default", "max_features : int number of features of the subspace (<= number of features", "of features of the subspace (<= number of features in dataset) Returns -------", "to be taken into account to split dataset Parameters ---------- data : np.array", "node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self)", "multiclass # Convert data to a (m, 1) array selecting values for samples", "np.array array of samples (# samples, # features) labels : np.array labels of", "<= 1: return 0 counts = np.bincount(y) proportions = counts / n_labels n_classes", "np.ndarray input labes in train time features : np.array features used to compute", "0.0: entropy -= prop * log(prop, n_classes) return entropy def information_gain( self, labels:", "data for probability estimates. Ignored when probability is False.Pass an int for reproducible", "= mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod", "class hyperplane # array of (m, nc) nc = # classes data =", "selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y:", "set_classifier(self, clf): self._clf = clf def set_features(self, features): self._features = features def set_impurity(self,", ") -> float: \"\"\"Compute information gain of a split candidate Parameters ---------- labels", "aren't enough samples to split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim >", "def get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class of the", "1: # split criteria for multiclass # Convert data to a (m, 1)", "dataset : np.array array of samples labels : np.array labels of the dataset", "Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack = [] self._push(tree) def", "= 0.0 # Only store dataset in Testing self._X = X if os.environ.get(\"TESTING\",", "criteria are “gini” for the Gini impurity and “entropy” for the information gain.,", "None else feature_set @staticmethod def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at", "of the samples to the hyperplane of the node Parameters ---------- node :", "self._down = None self._up = None self._class = None self._feature = None self._sample_weight", "selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset:", "labels (classes) Returns ------- np.array column of dataset to be taken into account", "the dataset max_features : int number of features of the subspace (<= number", "standardization of features should be applied on each node with the samples that", "data.shape[0] < self._min_samples_split: # there aren't enough samples to split self._up = np.ones((data.shape[0]),", "selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset", "samples that should go to one side of the tree (up) Parameters ----------", "feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be", "\"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack = [] self._push(tree) def __iter__(self):", "of the subspace (< number of features in dataset) Returns ------- tuple indices", "features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array,", "on the other side Returns ------- float information gain \"\"\" imp_prev = self.criterion_function(labels)", "reduction n_features = dataset.shape[1] if n_features == max_features: return tuple(range(n_features)) # select features", "make_predictor(self): \"\"\"Compute the class of the predictor and its belief based on the", "None, ): self._clf = clf self._title = title self._belief = 0.0 # Only", "None min_samples_split : int, optional The minimum number of samples required to split", "to split dataset \"\"\" max_gain = 0 selected = -1 for col in", "best feature set \"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for", "for any, by default None random_state : optional Controls the pseudo random number", "in train time (only in testing) y : np.ndarray input labes in train", "sklearn estimator ValueError criterion must be gini or entropy ValueError criteria has to", "to be a sklearn estimator ValueError criterion must be gini or entropy ValueError", "max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters ----------", "Snode: \"\"\" Nodes of the tree that keeps the svm classifier and if", "node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int):", "gain selected = feature_set return selected if selected is not None else feature_set", "else None self._y = y self._down = None self._up = None self._class =", "tuple indices of the features selected \"\"\" # return best features with mutual", "col in range(data.shape[1]): tup = y[data[:, col] > 0] tdn = y[data[:, col]", "class \"\"\" import os import warnings import random from math import log, factorial", "None: card_dn = labels_dn.shape[0] if labels_dn is not None else 0 imp_dn =", "count = np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y:", "n_features == max_features: return tuple(range(n_features)) # select features as selected in constructor return", "max_features : int number of features to form the subspace Returns ------- tuple", "side of the tree (up) Parameters ---------- samples : np.array array of samples", "dataset in a node. max_samples is incompatible with 'ovo' multiclass_strategy, by default None", "= ~self._up return [ origin[self._up] if any(self._up) else None, origin[down] if any(down) else", "int ) -> tuple: \"\"\"Return the best of five random feature set combinations", "to split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split", "Fast Correlation- Based, by default None criteria : str, optional ecides (just in", "information gain., by default \"entropy\", by default None feature_select : str, optional The", "self._title = title self._belief = 0.0 # Only store dataset in Testing self._X", "in dataset) Returns ------- tuple indices of the features selected \"\"\" # Random", "origin: np.array) -> list: \"\"\"Split an array in two based on indices (self._up)", "samples == 0: return 0.0 else: result = ( imp_prev - (card_up /", "node = Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True)", "sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels:", "train time features : np.array features used to compute hyperplane impurity : float", "Returns ------- tuple indices of the features selected \"\"\" # Random feature reduction", "max_features: return tuple(range(n_features)) # select features as selected in constructor return self.fs_function(dataset, labels,", "impurity): self._impurity = impurity def get_title(self) -> str: return self._title def get_classifier(self) ->", "= features def set_impurity(self, impurity): self._impurity = impurity def get_title(self) -> str: return", "each combination max_features : int number of features in dataset Returns ------- list", "try: self._class = classes[0] except IndexError: self._class = None def __str__(self) -> str:", "Returns ------- tuple indices of the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset,", "selected = -1 for col in range(data.shape[1]): tup = y[data[:, col] > 0]", "imp_up = self.criterion_function(labels_up) if labels_dn is not None: card_dn = labels_dn.shape[0] if labels_dn", "different criteria Parameters ---------- clf : SVC, optional classifier, by default None criterion", "self._y = y self._down = None self._up = None self._class = None self._feature", "\"\"\"Set the criteria to split arrays. Compute the indices of the samples that", "use the column computed in train time # is taking the classifier of", "max_features < num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm is used in", "self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y = y", "optional scaler used if any, by default None \"\"\" def __init__( self, clf:", "of features of the subspace (< number of features in dataset) Returns -------", "not None: random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select", "subspace (< number of features in dataset) Returns ------- tuple indices of the", "= np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card) self._class = classes[card", "node : Snode node containing the svm classifier data : np.ndarray samples to", "not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not None:", "f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset:", "node. 0 (default) for any, by default None random_state : optional Controls the", "be applied on each node with the samples that reach it , by", "column to take into # account to split the dataset col = self.decision_criteria(data,", "# features) labels : np.array labels of the dataset max_features : int number", "check Returns ------- list best feature set \"\"\" max_gain = 0 selected =", "SVM nodes Splitter class \"\"\" import os import warnings import random from math", "node title : str label describing the route to the node weight :", "indices of the features selected \"\"\" # No feature reduction n_features = dataset.shape[1]", "self, clf: SVC = None, criterion: str = None, feature_select: str = None,", "splitter depending on the self._feature_select hyper parameter Parameters ---------- dataset : np.array array", "of five random feature set combinations Parameters ---------- dataset : np.array array of", "tuple tuple with the dataset with only the features selected and the indices", "default None criteria : str, optional ecides (just in case of a multi", "entropy -= prop * log(prop, n_classes) return entropy def information_gain( self, labels: np.array,", "to be taken into account to split dataset \"\"\" max_gain = 0 selected", "in range(data.shape[1]): tup = y[data[:, col] > 0] tdn = y[data[:, col] <=", "features selected \"\"\" # return best features with mutual info with the label", "of the tree that keeps the svm classifier and if testing the dataset", "if n_features == max_features: return tuple(range(n_features)) # select features as selected in constructor", "Parameters ---------- origin : np.array dataset to split Returns ------- list list with", "return self._title def get_classifier(self) -> SVC: return self._clf def get_impurity(self) -> float: return", "\"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in features_sets:", "by default None normalize : bool, optional If standardization of features should be", "np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _, count = np.unique(y,", "features of the subspace (<= number of features in dataset) Returns ------- tuple", "the array \"\"\" down = ~self._up return [ origin[self._up] if any(self._up) else None,", "self._up is None and self._down is None def get_down(self) -> \"Snode\": return self._down", "np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy = 0.0 # Compute standard", "classifier data : np.ndarray samples to compute distance to hyperplane Returns ------- np.array", "MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features:", "max_features length. Depending on hyperparameter Parameters ---------- dataset : np.array array of samples", "best features. “random”: The algorithm generates 5 candidates and choose the best (max.", "None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None )", "column of dataset to be taken into account to split dataset Returns -------", "if len(self._stack) == 0: raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node", "if clf is None: raise ValueError(f\"clf has to be a sklearn estimator, got({clf})\")", "features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\"", "dataset in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None", "factorial import numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import", "= None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None", "the distances of every sample to every class hyperplane # array of (m,", "son): self._up = son def is_leaf(self) -> bool: return self._up is None and", "@staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy of a labels set Parameters", "self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there aren't enough samples to split", "to 5 combination of features randomly selected \"\"\" comb = set() # Generate", "def _fs_random( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return", "multiclass_strategy, by default None min_samples_split : int, optional The minimum number of samples", "_fs_random( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the", ": str, optional The strategy used to choose the feature set at each", "\"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array,", "sklearn estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must", "on each node with the samples that reach it , by default False", "the samples that reach it , by default False Raises ------ ValueError clf", "!= num_features). Supported criteria are “gini” for the Gini impurity and “entropy” for", "raise ValueError(f\"clf has to be a sklearn estimator, got({clf})\") if criterion not in", "required to split an internal node. 0 (default) for any, by default None", "info. gain) of them. \"mutual\": Chooses the best features w.r.t. their mutual info", "labels: np.array, labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute information gain of", "account to split dataset Returns ------- np.array column of dataset to be taken", "randomly selected \"\"\" comb = set() # Generate at most 5 combinations number", "the iterator interface return self def _push(self, node: Snode): if node is not", "# features) node : Snode Node of the tree where partition is going", "dataset to be taken into account to split dataset \"\"\" max_gain = 0", "= y self._down = None self._up = None self._class = None self._feature =", "\"mutual\": Chooses the best features w.r.t. their mutual info with the label. \"cfs\":", "[\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in {random, best,", "\"\"\" # Random feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return", "< self._min_samples_split: # there aren't enough samples to split self._up = np.ones((data.shape[0]), dtype=bool)", "f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator:", "Parameters ---------- labels : np.array labels of the dataset labels_up : np.array labels", "the information gain Parameters ---------- dataset : np.array array of samples (# samples,", "parameter Parameters ---------- dataset : np.array array of samples labels : np.array labels", "to check Returns ------- list best feature set \"\"\" max_gain = 0 selected", "classifier, by default None criterion : str, optional The function to measure the", "MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return", "== max_card][0] self._belief = max_card / np.sum(card) else: self._belief = 1 try: self._class", "mufs import MUFS class Snode: \"\"\" Nodes of the tree that keeps the", "assigned to it Parameters ---------- clf : SVC Classifier used X : np.ndarray", ": str, optional The function to measure the quality of a split (only", "max_features: int ) -> tuple: \"\"\"Return the variabes with higher f-score Parameters ----------", "number = factorial(features) / ( factorial(max_features) * factorial(features - max_features) ) set_length =", "( imp_prev - (card_up / samples) * imp_up - (card_dn / samples) *", "output across multiple function calls, by default None normalize : bool, optional If", "= None self._feature = None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\") !=", "None self._class = None self._feature = None self._sample_weight = ( weight if os.environ.get(\"TESTING\",", "self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\"", "criteria self._feature_select = feature_select self._normalize = normalize if clf is None: raise ValueError(f\"clf", "- np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy of", "selected if selected is not None else feature_set @staticmethod def _generate_spaces(features: int, max_features:", "shuffling the data for probability estimates. Ignored when probability is False.Pass an int", "Snode node containing the svm classifier data : np.ndarray samples to compute distance", "tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features:", "used if any, by default None \"\"\" def __init__( self, clf: SVC, X:", "for feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset, labels,", "the node title : str label describing the route to the node weight", "be in {random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function =", "False \"\"\" # data contains the distances of every sample to every class", "feature_set @staticmethod def _generate_spaces(features: int, max_features: int) -> list: \"\"\"Generate at most 5", "class of the predictor and its belief based on the subdataset of the", "( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\"", "_, samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node: Snode,", "random.seed(random_state) self._criterion = criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select = feature_select", "self._class = None self._feature = None self._sample_weight = ( weight if os.environ.get(\"TESTING\", \"NS\")", "np.array, max_features: int ) -> tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters", "def get_impurity(self) -> float: return self._impurity def get_features(self) -> np.array: return self._features def", "impurity of the node title : str label describing the route to the", "return self._partition_column def set_down(self, son): self._down = son def set_title(self, title): self._title =", "max_features: int ) -> tuple: \"\"\"Return the best features with mutual information with", "clf : SVC, optional classifier, by default None criterion : str, optional The", "factorial(features - max_features) ) set_length = min(5, number) while len(comb) < set_length: comb.add(", "distances to hyper plane of every class y : np.array column of dataset", "its complement partition has to be called first to establish up indices Parameters", ") -> tuple: \"\"\"Re3turn a subspace of the selected dataset of max_features length.", "dataset : np.array array of samples (# samples, # features) labels : np.array", "for col in range(data.shape[1]): tup = y[data[:, col] > 0] tdn = y[data[:,", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best of", ") @staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "of samples (# samples, # features) node : Snode Node of the tree", "into # account to split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else:", "samples, # features) node : Snode Node of the tree where partition is", "for probability estimates. Ignored when probability is False.Pass an int for reproducible output", "/ Test time - False \"\"\" # data contains the distances of every", "label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by default", "in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y", "criteria: str = None, min_samples_split: int = None, random_state=None, normalize=False, ): self._clf =", "the selected dataset of max_features length. Depending on hyperparameter Parameters ---------- dataset :", "f\"criterion must be gini or entropy got({criterion})\" ) if criteria not in [", "gini or entropy ValueError criteria has to be max_samples or impurity ValueError splitter", "max number of samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self,", "when probability is False.Pass an int for reproducible output across multiple function calls,", "max_gain: max_gain = gain selected = feature_set return selected if selected is not", "of the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod", "every node to choose the max_features best features. “random”: The algorithm generates 5", "are “gini” for the Gini impurity and “entropy” for the information gain., by", "class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\"", "entropy of a labels set Parameters ---------- y : np.array set of labels", ": np.array distances to hyper plane of every class y : np.array column", "the self._feature_select hyper parameter Parameters ---------- dataset : np.array array of samples labels", "= None self._up = None self._class = None self._feature = None self._sample_weight =", "the subspace (< number of features in dataset) Returns ------- tuple indices of", ": str, optional ecides (just in case of a multi class classification) which", "y: np.array) -> np.array: \"\"\"return column of dataset to be taken into account", "labels of the dataset labels_up : np.array labels of one side labels_dn :", "features): self._features = features def set_impurity(self, impurity): self._impurity = impurity def get_title(self) ->", "-> list: \"\"\"Return the best set of features among feature_sets, the criterion is", "self._down def get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class of", "constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array, labels: np.array, max_features:", "by default None feature_select : str, optional The strategy used to choose the", "in constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array, labels: np.array,", "= None, scaler: StandardScaler = None, ): self._clf = clf self._title = title", "optional classifier, by default None criterion : str, optional The function to measure", "the best (max. info. gain) of them. \"mutual\": Chooses the best features w.r.t.", "classifier based on SVM nodes Splitter class \"\"\" import os import warnings import", "gain > max_gain: max_gain = gain selected = feature_set return selected if selected", "Based, by default None criteria : str, optional ecides (just in case of", "an internal node. 0 (default) for any, by default None random_state : optional", "two based on indices (self._up) and its complement partition has to be called", "at each node (only used if max_features < num_features). Supported strategies are: “best”:", "list: \"\"\"Generate at most 5 feature random combinations Parameters ---------- features : int", "of every class y : np.array column of dataset to be taken into", "= factorial(features) / ( factorial(max_features) * factorial(features - max_features) ) set_length = min(5,", "of every class. nc = # of classes \"\"\" X_transformed = data[:, node._features]", "decision tree classifier based on SVM nodes Splitter class \"\"\" import os import", "X : np.ndarray input dataset in train time (only in testing) y :", "int): self._partition_column = col def get_partition_column(self) -> int: return self._partition_column def set_down(self, son):", "random feature set combinations Parameters ---------- dataset : np.array array of samples labels", "{random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\")", "\"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by default None", "be gini or entropy got({criterion})\" ) if criteria not in [ \"max_samples\", \"impurity\",", "the features selected \"\"\" # Random feature reduction n_features = dataset.shape[1] features_sets =", "np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) -> float: _, count", "= card_dn = imp_up = imp_dn = 0 if labels_up is not None:", "= self.criterion_function(labels_up) if labels_dn is not None: card_dn = labels_dn.shape[0] if labels_dn is", "tuple: \"\"\"Return the best features with mutual information with labels Parameters ---------- dataset", "of the subspace (<= number of features in dataset) Returns ------- tuple indices", "\"NS\") != \"NS\" else None self._y = y self._down = None self._up =", "if prop != 0.0: entropy -= prop * log(prop, n_classes) return entropy def", "-> \"Snode\": return self._down def get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute", "in dataset) Returns ------- tuple indices of the features selected \"\"\" mufs =", "= np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split criteria for multiclass", "of dataset to be taken into account to split dataset \"\"\" # select", "hyper parameter Parameters ---------- dataset : np.array array of samples labels : np.array", "/ samples) * imp_dn ) return result def _select_best_set( self, dataset: np.array, labels:", "raise ValueError( f\"criteria has to be max_samples or impurity; got ({criteria})\" ) if", "\"\"\" # select the class with max number of samples _, samples =", "> 0] tdn = y[data[:, col] <= 0] info_gain = self.information_gain(y, tup, tdn)", "time # is taking the classifier of class <col> col = node.get_partition_column() if", "the hyperplane of every class. nc = # of classes \"\"\" X_transformed =", "sets to check Returns ------- list best feature set \"\"\" max_gain = 0", "def get_partition_column(self) -> int: return self._partition_column def set_down(self, son): self._down = son def", "gain = self.information_gain(labels, y1, y2) if gain > max_gain: max_gain = gain selected", "subspace Returns ------- tuple tuple with the dataset with only the features selected", "time just use the column computed in train time # is taking the", "indices (self._up) and its complement partition has to be called first to establish", "side labels_dn : np.array labels on the other side Returns ------- float information", "hyperplane impurity : float impurity of the node title : str label describing", ": np.ndarray samples to compute distance to hyperplane Returns ------- np.array array of", "------ ValueError clf has to be a sklearn estimator ValueError criterion must be", "(max. info. gain) of them. \"mutual\": Chooses the best features w.r.t. their mutual", "labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int ) -> tuple:", ".fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features: int", "max_gain = info_gain return selected @staticmethod def _max_samples(data: np.array, y: np.array) -> np.array:", "max_samples or impurity ValueError splitter must be in {random, best, mutual, cfs, fcbf}", ": np.array vector of labels (classes) Returns ------- np.array column of dataset to", "def get_down(self) -> \"Snode\": return self._down def get_up(self) -> \"Snode\": return self._up def", "labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not None: card_dn = labels_dn.shape[0] if", "= col max_gain = info_gain return selected @staticmethod def _max_samples(data: np.array, y: np.array)", "dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time just use", "return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features: int", "---------- data : np.array distances to hyper plane of every class y :", "= max_card / np.sum(card) else: self._belief = 1 try: self._class = classes[0] except", "we have to compute the column to take into # account to split", "tdn = y[data[:, col] <= 0] info_gain = self.information_gain(y, tup, tdn) if info_gain", "get_features(self) -> np.array: return self._features def set_up(self, son): self._up = son def is_leaf(self)", "------- tuple indices of the features selected \"\"\" # No feature reduction n_features", "labes in train time features : np.array features used to compute hyperplane impurity", "it is a leaf \"\"\" if not self.is_leaf(): return classes, card = np.unique(self._y,", "-> tuple: \"\"\"Re3turn a subspace of the selected dataset of max_features length. Depending", "generation for shuffling the data for probability estimates. Ignored when probability is False.Pass", "float: \"\"\"Compute information gain of a split candidate Parameters ---------- labels : np.array", "features in dataset) Returns ------- tuple indices of the features selected \"\"\" #", "in every node to choose the max_features best features. “random”: The algorithm generates", "“gini” for the Gini impurity and “entropy” for the information gain., by default", "the quality of a split (only used if max_features != num_features). Supported criteria", "features randomly selected \"\"\" comb = set() # Generate at most 5 combinations", "impurity: float, title: str, weight: np.ndarray = None, scaler: StandardScaler = None, ):", "labels_up : np.array labels of one side labels_dn : np.array labels on the", "= self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits a dataset in", "with the samples that reach it , by default False Raises ------ ValueError", "\"\"\" def __init__( self, clf: SVC = None, criterion: str = None, feature_select:", "is the information gain Parameters ---------- dataset : np.array array of samples (#", "selected in constructor return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array, labels:", "partition is producing information gain data = np.ones(data.shape) data = data[:, col] self._up", "if gain > max_gain: max_gain = gain selected = feature_set return selected if", "self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits a dataset in two based", "“entropy” for the information gain., by default \"entropy\", by default None feature_select :", "self.part(labels) gain = self.information_gain(labels, y1, y2) if gain > max_gain: max_gain = gain", "Convert data to a (m, 1) array selecting values for samples if train:", "(only used if max_features != num_features). Supported criteria are “gini” for the Gini", "dataset max_features : int number of features to form the subspace Returns -------", "\"subset\" ) self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1,", "import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.exceptions", "np.array array of labels features_sets : list list of features sets to check", "/ np.sum(card) else: self._belief = 1 try: self._class = classes[0] except IndexError: self._class", "w.r.t. their mutual info with the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\":", "fcbf} \"\"\" def __init__( self, clf: SVC = None, criterion: str = None,", "best set of features among feature_sets, the criterion is the information gain Parameters", "\"\"\"Return the best set of features among feature_sets, the criterion is the information", "max_gain = 0 selected = -1 for col in range(data.shape[1]): tup = y[data[:,", "def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y: np.array) ->", "\"\"\"return column of dataset to be taken into account to split dataset Parameters", "def set_down(self, son): self._down = son def set_title(self, title): self._title = title def", ") return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree", "sample to the hyperplane of every class. nc = # of classes \"\"\"", "= None, random_state=None, normalize=False, ): self._clf = clf self._random_state = random_state if random_state", "got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function", "float impurity of the node title : str label describing the route to", "None random_state : optional Controls the pseudo random number generation for shuffling the", "features) labels : np.array array of labels features_sets : list list of features", ": list list of features sets to check Returns ------- list best feature", "* log(prop, n_classes) return entropy def information_gain( self, labels: np.array, labels_up: np.array, labels_dn:", "labels_dn is not None: card_dn = labels_dn.shape[0] if labels_dn is not None else", "selected \"\"\" # No feature reduction n_features = dataset.shape[1] if n_features == max_features:", "dataset to split Returns ------- list list with two splits of the array", "and if testing the dataset assigned to it Parameters ---------- clf : SVC", "np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split criteria for multiclass #", "is None and self._down is None def get_down(self) -> \"Snode\": return self._down def", "sklearn SelectKBest algorithm is used in every node to choose the max_features best", "criterion must be gini or entropy ValueError criteria has to be max_samples or", "the max_features best features. “random”: The algorithm generates 5 candidates and choose the", "# data contains the distances of every sample to every class hyperplane #", "self, dataset: np.array, labels: np.array, features_sets: list ) -> list: \"\"\"Return the best", "y2) if gain > max_gain: max_gain = gain selected = feature_set return selected", "in each combination max_features : int number of features in dataset Returns -------", "Nodes of the tree that keeps the svm classifier and if testing the", "\"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in {random, best, mutual, cfs, fcbf}", "plane of every class y : np.array vector of labels (classes) Returns -------", "node._y) node.set_partition_column(col) else: # in predcit time just use the column computed in", "if labels_dn is not None else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up", "splits of the array \"\"\" down = ~self._up return [ origin[self._up] if any(self._up)", "class Snode: \"\"\" Nodes of the tree that keeps the svm classifier and", "values for samples if train: # in train time we have to compute", "random number generation for shuffling the data for probability estimates. Ignored when probability", "labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain", "dataset max_features : int number of features of the subspace (<= number of", "def __init__( self, clf: SVC = None, criterion: str = None, feature_select: str", "# Compute standard entropy. for prop in proportions: if prop != 0.0: entropy", "for prop in proportions: if prop != 0.0: entropy -= prop * log(prop,", "part(self, origin: np.array) -> list: \"\"\"Split an array in two based on indices", "each node (only used if max_features < num_features). Supported strategies are: “best”: sklearn", "- True / Test time - False \"\"\" # data contains the distances", "used to compute hyperplane impurity : float impurity of the node title :", "class Splitter: \"\"\" Splits a dataset in two based on different criteria Parameters", "it , by default False Raises ------ ValueError clf has to be a", "of the tree where partition is going to be made train : bool", "dtype=bool) return if data.ndim > 1: # split criteria for multiclass # Convert", "taken into account to split dataset \"\"\" # select the class with max", "not None else 0 imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn if", "to split dataset \"\"\" # select the class with max number of samples", "len(classes) > 1: max_card = max(card) self._class = classes[card == max_card][0] self._belief =", "selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def _impurity(self,", "-> np.array: return self._features def set_up(self, son): self._up = son def is_leaf(self) ->", "self.information_gain(labels, y1, y2) if gain > max_gain: max_gain = gain selected = feature_set", "self._features = features self._impurity = impurity self._partition_column: int = -1 self._scaler = scaler", "the dataset with only the features selected and the indices of the features", "most 5 combinations number = factorial(features) / ( factorial(max_features) * factorial(features - max_features)", "has to be max_samples or impurity; got ({criteria})\" ) if feature_select not in", "hyper plane of every class y : np.array column of dataset to be", "node Parameters ---------- node : Snode node containing the svm classifier data :", "else: self._belief = 1 try: self._class = classes[0] except IndexError: self._class = None", "key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset: np.array, labels: np.array, max_features:", "(default) for any, by default None random_state : optional Controls the pseudo random", "col] > 0] tdn = y[data[:, col] <= 0] info_gain = self.information_gain(y, tup,", "self._class = None def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if self.is_leaf():", "enough samples to split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1:", "ConvergenceWarning from mufs import MUFS class Snode: \"\"\" Nodes of the tree that", "mutual info with the label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)),", "of the dataset max_features : int number of features of the subspace (<", "is a leaf \"\"\" if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True)", "default None random_state : optional Controls the pseudo random number generation for shuffling", "Returns ------- float entropy \"\"\" n_labels = len(y) if n_labels <= 1: return", "labels: np.array, features_sets: list ) -> list: \"\"\"Return the best set of features", "the class with max number of samples _, samples = np.unique(y, return_counts=True) return", "data : np.ndarray samples to compute distance to hyperplane Returns ------- np.array array", "the predictor and its belief based on the subdataset of the node only", "raise StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits", "( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return", "the pseudo random number generation for shuffling the data for probability estimates. Ignored", "standard entropy. for prop in proportions: if prop != 0.0: entropy -= prop", "1: max_card = max(card) self._class = classes[card == max_card][0] self._belief = max_card /", "\"\"\"Compute the class of the predictor and its belief based on the subdataset", ": np.ndarray, optional weights applied to input dataset in train time, by default", "self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array, max_features: int", "indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def _impurity(self, data: np.array,", "train: # in train time we have to compute the column to take", "gain of a split candidate Parameters ---------- labels : np.array labels of the", "ValueError(f\"clf has to be a sklearn estimator, got({clf})\") if criterion not in [\"gini\",", "/ n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy =", ") set_length = min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) )", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best features with", ": optional Controls the pseudo random number generation for shuffling the data for", "!= \"NS\" else None ) self._features = features self._impurity = impurity self._partition_column: int", ") self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2)", "Test time - False \"\"\" # data contains the distances of every sample", "import log, factorial import numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from", "return if data.ndim > 1: # split criteria for multiclass # Convert data", "0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain = self.information_gain(labels,", "node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column =", "time (only in testing) y : np.ndarray input labes in train time features", "def get_features(self) -> np.array: return self._features def set_up(self, son): self._up = son def", "split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time", "= min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb)", "self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" )", "best, mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria", "node._scaler, ) def set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self) -> int:", "random_state=None, normalize=False, ): self._clf = clf self._random_state = random_state if random_state is not", "dataset to be taken into account to split dataset Returns ------- np.array column", "= title def set_classifier(self, clf): self._clf = clf def set_features(self, features): self._features =", ": int number of features in dataset Returns ------- list list with up", "get_classifier(self) -> SVC: return self._clf def get_impurity(self) -> float: return self._impurity def get_features(self)", "return self._down def get_up(self) -> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class", "combinations Parameters ---------- dataset : np.array array of samples labels : np.array labels", "feature_set], labels) node = Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset,", "set \"\"\" max_gain = 0 selected = None warnings.filterwarnings(\"ignore\", category=ConvergenceWarning) for feature_set in", "np.array, max_features: int ) -> tuple: \"\"\"Return the variabes with higher f-score Parameters", "in {random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self,", "in two based on indices (self._up) and its complement partition has to be", "optional weights applied to input dataset in train time, by default None scaler", "of labels (classes) Returns ------- np.array column of dataset to be taken into", "to a (m, 1) array selecting values for samples if train: # in", "partition has to be called first to establish up indices Parameters ---------- origin", "take into # account to split the dataset col = self.decision_criteria(data, node._y) node.set_partition_column(col)", "tuple indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset,", "if data.shape[0] < self._min_samples_split: # there aren't enough samples to split self._up =", "] def _distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of the", "indices of the features selected \"\"\" # Random feature reduction n_features = dataset.shape[1]", "y1, y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if gain > max_gain:", "first to establish up indices Parameters ---------- origin : np.array dataset to split", "result def _select_best_set( self, dataset: np.array, labels: np.array, features_sets: list ) -> list:", "return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs( dataset:", "np.array labels of the dataset labels_up : np.array labels of one side labels_dn", "of the tree (up) Parameters ---------- samples : np.array array of samples (#", "import numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler", "list(comb) def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "= # of classes \"\"\" X_transformed = data[:, node._features] if self._normalize: X_transformed =", "str, optional The strategy used to choose the feature set at each node", "split dataset Returns ------- np.array column of dataset to be taken into account", ", by default False Raises ------ ValueError clf has to be a sklearn", "np.array dataset to split Returns ------- list list with two splits of the", "node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits a dataset", "contains the distances of every sample to every class hyperplane # array of", "on the self._feature_select hyper parameter Parameters ---------- dataset : np.array array of samples", "self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split criteria for", "array of samples (# samples, # features) node : Snode Node of the", "/ np.sum(count))) @staticmethod def _entropy(y: np.array) -> float: \"\"\"Compute entropy of a labels", "subspace of the selected dataset of max_features length. Depending on hyperparameter Parameters ----------", "criteria has to be max_samples or impurity ValueError splitter must be in {random,", "features self._impurity = impurity self._partition_column: int = -1 self._scaler = scaler @classmethod def", "return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels:", "dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2 = self.part(labels)", "None criteria : str, optional ecides (just in case of a multi class", "of the features selected \"\"\" # Random feature reduction n_features = dataset.shape[1] features_sets", ") self._features = features self._impurity = impurity self._partition_column: int = -1 self._scaler =", "information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute information", "column of dataset to be taken into account to split dataset \"\"\" max_gain", "get_impurity(self) -> float: return self._impurity def get_features(self) -> np.array: return self._features def set_up(self,", "\"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters ---------- dataset : np.array array", "Snode Node of the tree where partition is going to be made train", "criteria : str, optional ecides (just in case of a multi class classification)", "# Random feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset,", "is used in every node to choose the max_features best features. “random”: The", "number of features in each combination max_features : int number of features in", ": np.array distances to hyper plane of every class y : np.array vector", "mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def", "or impurity; got ({criteria})\" ) if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\",", "to split dataset Returns ------- np.array column of dataset to be taken into", ": np.ndarray input labes in train time features : np.array features used to", "return self.fs_function(dataset, labels, max_features) def get_subspace( self, dataset: np.array, labels: np.array, max_features: int", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm", "-= prop * log(prop, n_classes) return entropy def information_gain( self, labels: np.array, labels_up:", "_select_best_set( self, dataset: np.array, labels: np.array, features_sets: list ) -> list: \"\"\"Return the", "------- list list with two splits of the array \"\"\" down = ~self._up", "min_samples_split: int = None, random_state=None, normalize=False, ): self._clf = clf self._random_state = random_state", "algorithm with max_features limit Parameters ---------- dataset : np.array array of samples labels", "min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize = normalize if clf is", "of a split (only used if max_features != num_features). Supported criteria are “gini”", "np.array array of samples labels : np.array labels of the dataset max_features :", "n_classes <= 1: return 0 entropy = 0.0 # Compute standard entropy. for", "If standardization of features should be applied on each node with the samples", "case of a multi class classification) which column (class) use to split the", "= getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels:", "self._push(tree) def __iter__(self): # To complete the iterator interface return self def _push(self,", "return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f} \" f\"counts={count_values}\" )", "int ) -> tuple: \"\"\"Return the variabes with higher f-score Parameters ---------- dataset", "( factorial(max_features) * factorial(features - max_features) ) set_length = min(5, number) while len(comb)", "max_features) def get_subspace( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "# Convert data to a (m, 1) array selecting values for samples if", "of dataset to be taken into account to split dataset Parameters ---------- data", "None, origin[down] if any(down) else None, ] def _distances(self, node: Snode, data: np.ndarray)", "y2 = self.part(labels) gain = self.information_gain(labels, y1, y2) if gain > max_gain: max_gain", "= self.information_gain(y, tup, tdn) if info_gain > max_gain: selected = col max_gain =", "self._feature_select = feature_select self._normalize = normalize if clf is None: raise ValueError(f\"clf has", "if any(self._up) else None, origin[down] if any(down) else None, ] def _distances(self, node:", "return selected if selected is not None else feature_set @staticmethod def _generate_spaces(features: int,", "best (max. info. gain) of them. \"mutual\": Chooses the best features w.r.t. their", "self.decision_criteria(data, node._y) node.set_partition_column(col) else: # in predcit time just use the column computed", "== -1: # No partition is producing information gain data = np.ones(data.shape) data", "if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features = features self._impurity =", "in case of a multi class classification) which column (class) use to split", "samples to split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: #", "Splitter: \"\"\" Splits a dataset in two based on different criteria Parameters ----------", "random combinations Parameters ---------- features : int number of features in each combination", "of dataset to be taken into account to split dataset \"\"\" max_gain =", "or entropy got({criterion})\" ) if criteria not in [ \"max_samples\", \"impurity\", ]: raise", "to be max_samples or impurity; got ({criteria})\" ) if feature_select not in [\"random\",", "partition(self, samples: np.array, node: Snode, train: bool): \"\"\"Set the criteria to split arrays.", "the subspace (<= number of features in dataset) Returns ------- tuple indices of", "Returns ------- list best feature set \"\"\" max_gain = 0 selected = None", "@staticmethod def _gini(y: np.array) -> float: _, count = np.unique(y, return_counts=True) return 1", "<col> col = node.get_partition_column() if col == -1: # No partition is producing", "np.ndarray, optional weights applied to input dataset in train time, by default None", ": np.array labels on the other side Returns ------- float information gain \"\"\"", "copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title,", "-> \"Snode\": return self._up def make_predictor(self): \"\"\"Compute the class of the predictor and", "labels: np.array, max_features: int ) -> tuple: \"\"\"Compute the indices of the features", "def _distances(self, node: Snode, data: np.ndarray) -> np.array: \"\"\"Compute distances of the samples", "based on the subdataset of the node only if it is a leaf", "(card_dn / samples) * imp_dn ) return result def _select_best_set( self, dataset: np.array,", "_fs_mutual( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best", "generates 5 candidates and choose the best (max. info. gain) of them. \"mutual\":", "ValueError( f\"criteria has to be max_samples or impurity; got ({criteria})\" ) if feature_select", "# No partition is producing information gain data = np.ones(data.shape) data = data[:,", "fcbf} got \" f\"({feature_select})\" ) self.criterion_function = getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\")", "return [ origin[self._up] if any(self._up) else None, origin[down] if any(down) else None, ]", "tree (up) Parameters ---------- samples : np.array array of samples (# samples, #", "self.information_gain(y, tup, tdn) if info_gain > max_gain: selected = col max_gain = info_gain", "return cls( node._clf, node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self,", "np.array, labels_dn: np.array ) -> float: \"\"\"Compute information gain of a split candidate", "/ samples) * imp_up - (card_dn / samples) * imp_dn ) return result", "tree classifier based on SVM nodes Splitter class \"\"\" import os import warnings", "labels, 5e-4).get_results() def partition_impurity(self, y: np.array) -> np.array: return self.criterion_function(y) @staticmethod def _gini(y:", "@staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return", "if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y = y self._down = None", "\"NS\" else None self._y = y self._down = None self._up = None self._class", "title self._belief = 0.0 # Only store dataset in Testing self._X = X", "a node. max_samples is incompatible with 'ovo' multiclass_strategy, by default None min_samples_split :", "set_up(self, son): self._up = son def is_leaf(self) -> bool: return self._up is None", "reach it , by default False Raises ------ ValueError clf has to be", "int number of features in each combination max_features : int number of features", "number generation for shuffling the data for probability estimates. Ignored when probability is", "\"\"\" # return best features with mutual info with the label feature_list =", "subspace (<= number of features in dataset) Returns ------- tuple indices of the", "tree where partition is going to be made train : bool Train time", "the criteria to split arrays. Compute the indices of the samples that should", "= data > 0 def part(self, origin: np.array) -> list: \"\"\"Split an array", "tree: Snode): self._stack = [] self._push(tree) def __iter__(self): # To complete the iterator", "weight if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features = features self._impurity", "node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column = col def get_partition_column(self) ->", "def __init__( self, clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity: float,", "self._criterion = criterion self._min_samples_split = min_samples_split self._criteria = criteria self._feature_select = feature_select self._normalize", "Compute the indices of the samples that should go to one side of", "prop != 0.0: entropy -= prop * log(prop, n_classes) return entropy def information_gain(", "criterion not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini or entropy", "column of dataset to be taken into account to split dataset \"\"\" #", "\" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack", "on different criteria Parameters ---------- clf : SVC, optional classifier, by default None", "num_features). Supported criteria are “gini” for the Gini impurity and “entropy” for the", "to be a sklearn estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise", "features in dataset) Returns ------- tuple indices of the features selected \"\"\" mufs", "the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def", "from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from", "describing the route to the node weight : np.ndarray, optional weights applied to", "by default None random_state : optional Controls the pseudo random number generation for", "labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:] ) @staticmethod def _fs_cfs(", "(<= number of features in dataset) Returns ------- tuple indices of the features", "def set_features(self, features): self._features = features def set_impurity(self, impurity): self._impurity = impurity def", "of class <col> col = node.get_partition_column() if col == -1: # No partition", "samples labels : np.array labels of the dataset max_features : int number of", "-> np.array: \"\"\"return column of dataset to be taken into account to split", "# in train time we have to compute the column to take into", "indices of the samples that should go to one side of the tree", "complete the iterator interface return self def _push(self, node: Snode): if node is", "in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in {random,", "{random, best, mutual, cfs, fcbf} \"\"\" def __init__( self, clf: SVC = None,", "mutual info with the label. \"cfs\": Apply Correlation-based Feature Selection. \"fcbf\": Apply Fast", "------- float entropy \"\"\" n_labels = len(y) if n_labels <= 1: return 0", "classifier of class <col> col = node.get_partition_column() if col == -1: # No", "StandardScaler from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from mufs import MUFS", "clf is None: raise ValueError(f\"clf has to be a sklearn estimator, got({clf})\") if", "the dataset in a node. max_samples is incompatible with 'ovo' multiclass_strategy, by default", "features: np.array, impurity: float, title: str, weight: np.ndarray = None, scaler: StandardScaler =", "__init__( self, clf: SVC = None, criterion: str = None, feature_select: str =", "------- tuple indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return", "of the features selected \"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices],", "feature_set in features_sets: self._clf.fit(dataset[:, feature_set], labels) node = Snode( self._clf, dataset, labels, feature_set,", "higher f-score Parameters ---------- dataset : np.array array of samples labels : np.array", "number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set(", "5 candidates and choose the best (max. info. gain) of them. \"mutual\": Chooses", "return tuple(range(n_features)) # select features as selected in constructor return self.fs_function(dataset, labels, max_features)", "def set_title(self, title): self._title = title def set_classifier(self, clf): self._clf = clf def", "np.array distances to hyper plane of every class y : np.array column of", "labels, features_sets) @staticmethod def _fs_best( dataset: np.array, labels: np.array, max_features: int ) ->", "list: \"\"\"Return the best set of features among feature_sets, the criterion is the", "split (only used if max_features != num_features). Supported criteria are “gini” for the", "= gain selected = feature_set return selected if selected is not None else", "feature set combinations Parameters ---------- dataset : np.array array of samples labels :", "factorial(features) / ( factorial(max_features) * factorial(features - max_features) ) set_length = min(5, number)", "\"NS\" else None ) self._features = features self._impurity = impurity self._partition_column: int =", "Returns ------- np.array column of dataset to be taken into account to split", "selected \"\"\" comb = set() # Generate at most 5 combinations number =", "y[data[:, col] <= 0] info_gain = self.information_gain(y, tup, tdn) if info_gain > max_gain:", "Apply Fast Correlation- Based, by default None criteria : str, optional ecides (just", "nc = # of classes \"\"\" X_transformed = data[:, node._features] if self._normalize: X_transformed", "IndexError: self._class = None def __str__(self) -> str: count_values = np.unique(self._y, return_counts=True) if", "column of dataset to be taken into account to split dataset Parameters ----------", "svm classifier and if testing the dataset assigned to it Parameters ---------- clf", "dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn a subspace of", "self._title def get_classifier(self) -> SVC: return self._clf def get_impurity(self) -> float: return self._impurity", "testing) y : np.ndarray input labes in train time features : np.array features", "\"\"\" indices = self._get_subspaces_set(dataset, labels, max_features) return dataset[:, indices], indices def _impurity(self, data:", "that should go to one side of the tree (up) Parameters ---------- samples", "def _get_subspaces_set( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Compute", "is False.Pass an int for reproducible output across multiple function calls, by default", "from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from mufs import MUFS class", "imp_dn = self.criterion_function(labels_dn) samples = card_up + card_dn if samples == 0: return", ") -> tuple: \"\"\"Return the best of five random feature set combinations Parameters", "log, factorial import numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.preprocessing", "features with mutual information with labels Parameters ---------- dataset : np.array array of", "tuple indices of the features selected \"\"\" # No feature reduction n_features =", "set combinations Parameters ---------- dataset : np.array array of samples labels : np.array", "None, min_samples_split: int = None, random_state=None, normalize=False, ): self._clf = clf self._random_state =", "best of five random feature set combinations Parameters ---------- dataset : np.array array", "dataset with only the features selected and the indices of the features selected", "---------- dataset : np.array array of samples (# samples, # features) labels :", "label feature_list = mutual_info_classif(dataset, labels) return tuple( sorted( range(len(feature_list)), key=lambda sub: feature_list[sub] )[-max_features:]", "Supported strategies are: “best”: sklearn SelectKBest algorithm is used in every node to", "dataset labels_up : np.array labels of one side labels_dn : np.array labels on", "(only used if max_features < num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm", "= getattr(self, f\"_{self._criterion}\") self.decision_criteria = getattr(self, f\"_{self._criteria}\") self.fs_function = getattr(self, f\"_fs_{self._feature_select}\") def _fs_random(", ") -> tuple: \"\"\"Compute the indices of the features selected by splitter depending", "taken into account to split dataset Parameters ---------- data : np.array distances to", "optional The minimum number of samples required to split an internal node. 0", "y[data[:, col] > 0] tdn = y[data[:, col] <= 0] info_gain = self.information_gain(y,", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm with", "default \"entropy\", by default None feature_select : str, optional The strategy used to", ".get_support(indices=True) ) @staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features: int ) ->", "tuple indices of the features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.cfs(dataset,", "cfs, fcbf} \"\"\" def __init__( self, clf: SVC = None, criterion: str =", "def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based", "samples to compute distance to hyperplane Returns ------- np.array array of shape (m,", "def get_subspace( self, dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Re3turn", "tuple: \"\"\"Return the best of five random feature set combinations Parameters ---------- dataset", "if it is a leaf \"\"\" if not self.is_leaf(): return classes, card =", "dataset to be taken into account to split dataset Parameters ---------- data :", "SelectKBest algorithm is used in every node to choose the max_features best features.", "default None normalize : bool, optional If standardization of features should be applied", "to be made train : bool Train time - True / Test time", "self._features = features def set_impurity(self, impurity): self._impurity = impurity def get_title(self) -> str:", "ValueError( \"splitter must be in {random, best, mutual, cfs, fcbf} got \" f\"({feature_select})\"", "classifier and if testing the dataset assigned to it Parameters ---------- clf :", "used X : np.ndarray input dataset in train time (only in testing) y", "calls, by default None normalize : bool, optional If standardization of features should", "os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None ) self._features = features self._impurity = impurity", "only if it is a leaf \"\"\" if not self.is_leaf(): return classes, card", "a leaf \"\"\" if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if", ") if feature_select not in [\"random\", \"best\", \"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter", "features among feature_sets, the criterion is the information gain Parameters ---------- dataset :", ") return result def _select_best_set( self, dataset: np.array, labels: np.array, features_sets: list )", "is not None else feature_set @staticmethod def _generate_spaces(features: int, max_features: int) -> list:", "nc) nc = # classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split:", "info_gain > max_gain: selected = col max_gain = info_gain return selected @staticmethod def", "1: return 0 counts = np.bincount(y) proportions = counts / n_labels n_classes =", "features selected \"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self,", "Random feature reduction n_features = dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels,", "for the information gain., by default \"entropy\", by default None feature_select : str,", "dataset) Returns ------- tuple indices of the features selected \"\"\" # return best", "hyperplane Returns ------- np.array array of shape (m, nc) with the distances of", "= X if os.environ.get(\"TESTING\", \"NS\") != \"NS\" else None self._y = y self._down", "Snode( self._clf, dataset, labels, feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2", "in two based on different criteria Parameters ---------- clf : SVC, optional classifier,", "features in dataset) Returns ------- tuple indices of the features selected \"\"\" return", "features with mutual info with the label feature_list = mutual_info_classif(dataset, labels) return tuple(", ") class Siterator: \"\"\"Stree preorder iterator\"\"\" def __init__(self, tree: Snode): self._stack = []", "np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Compute the indices of the", "be a sklearn estimator, got({clf})\") if criterion not in [\"gini\", \"entropy\"]: raise ValueError(", "return classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1: max_card = max(card)", "<= 0] info_gain = self.information_gain(y, tup, tdn) if info_gain > max_gain: selected =", "to be taken into account to split dataset \"\"\" # select the class", "Returns ------- tuple indices of the features selected \"\"\" # return best features", "be max_samples or impurity ValueError splitter must be in {random, best, mutual, cfs,", "-> str: return self._title def get_classifier(self) -> SVC: return self._clf def get_impurity(self) ->", "-> float: return self._impurity def get_features(self) -> np.array: return self._features def set_up(self, son):", "self._belief = 1 try: self._class = classes[0] except IndexError: self._class = None def", "has to be a sklearn estimator ValueError criterion must be gini or entropy", "self._belief = max_card / np.sum(card) else: self._belief = 1 try: self._class = classes[0]", "= getattr(self, f\"_fs_{self._feature_select}\") def _fs_random( self, dataset: np.array, labels: np.array, max_features: int )", "split self._up = np.ones((data.shape[0]), dtype=bool) return if data.ndim > 1: # split criteria", "proportions = counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1: return", "to compute hyperplane impurity : float impurity of the node title : str", ") @staticmethod def _fs_mutual( dataset: np.array, labels: np.array, max_features: int ) -> tuple:", "labels_up is not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is", "\"mutual\", \"cfs\", \"fcbf\"]: raise ValueError( \"splitter must be in {random, best, mutual, cfs,", "the best features with mutual information with labels Parameters ---------- dataset : np.array", "labels : np.array array of labels features_sets : list list of features sets", "): self._clf = clf self._title = title self._belief = 0.0 # Only store", "route to the node weight : np.ndarray, optional weights applied to input dataset", "is not None: card_up = labels_up.shape[0] imp_up = self.criterion_function(labels_up) if labels_dn is not", "= np.unique(y, return_counts=True) return 1 - np.sum(np.square(count / np.sum(count))) @staticmethod def _entropy(y: np.array)", "def copy(cls, node: \"Snode\") -> \"Snode\": return cls( node._clf, node._X, node._y, node._features, node._impurity,", "if info_gain > max_gain: selected = col max_gain = info_gain return selected @staticmethod", "samples (# samples, # features) node : Snode Node of the tree where", "labels: np.array, max_features: int ) -> tuple: \"\"\"Return the best of five random", "must be in {random, best, mutual, cfs, fcbf} \"\"\" def __init__( self, clf:", "0 (default) for any, by default None random_state : optional Controls the pseudo", "samples = np.unique(y, return_counts=True) return np.argmax(samples) def partition(self, samples: np.array, node: Snode, train:", "Parameters ---------- dataset : np.array array of samples labels : np.array labels of", "card_dn = labels_dn.shape[0] if labels_dn is not None else 0 imp_dn = self.criterion_function(labels_dn)", "Feature Selection. \"fcbf\": Apply Fast Correlation- Based, by default None criteria : str,", "“random”: The algorithm generates 5 candidates and choose the best (max. info. gain)", "imp_prev = self.criterion_function(labels) card_up = card_dn = imp_up = imp_dn = 0 if", "if n_classes <= 1: return 0 entropy = 0.0 # Compute standard entropy.", "indices of the features selected \"\"\" return ( SelectKBest(k=max_features) .fit(dataset, labels) .get_support(indices=True) )", "Generate at most 5 combinations number = factorial(features) / ( factorial(max_features) * factorial(features", "if selected is not None else feature_set @staticmethod def _generate_spaces(features: int, max_features: int)", "= self.criterion_function(labels_dn) samples = card_up + card_dn if samples == 0: return 0.0", "with higher f-score Parameters ---------- dataset : np.array array of samples labels :", ": str label describing the route to the node weight : np.ndarray, optional", ": np.array column of dataset to be taken into account to split dataset", "max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter algorithm with max_features limit Parameters", "each node with the samples that reach it , by default False Raises", "None, criteria: str = None, min_samples_split: int = None, random_state=None, normalize=False, ): self._clf", "if n_labels <= 1: return 0 counts = np.bincount(y) proportions = counts /", "of one side labels_dn : np.array labels on the other side Returns -------", "optional Controls the pseudo random number generation for shuffling the data for probability", "set_down(self, son): self._down = son def set_title(self, title): self._title = title def set_classifier(self,", "-> float: \"\"\"Compute information gain of a split candidate Parameters ---------- labels :", "not in [\"gini\", \"entropy\"]: raise ValueError( f\"criterion must be gini or entropy got({criterion})\"", "None, random_state=None, normalize=False, ): self._clf = clf self._random_state = random_state if random_state is", "counts / n_labels n_classes = np.count_nonzero(proportions) if n_classes <= 1: return 0 entropy", "0.0 # Only store dataset in Testing self._X = X if os.environ.get(\"TESTING\", \"NS\")", "(m, nc) with the distances of every sample to the hyperplane of every", "len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features), max_features))) ) return list(comb) def _get_subspaces_set( self, dataset:", "features selected by splitter depending on the self._feature_select hyper parameter Parameters ---------- dataset", "train time, by default None scaler : StandardScaler, optional scaler used if any,", "of features among feature_sets, the criterion is the information gain Parameters ---------- dataset", "compute distance to hyperplane Returns ------- np.array array of shape (m, nc) with", "max_features: int ) -> tuple: \"\"\"Return the best of five random feature set", "node class Splitter: \"\"\" Splits a dataset in two based on different criteria", "= random_state if random_state is not None: random.seed(random_state) self._criterion = criterion self._min_samples_split =", "np.ndarray samples to compute distance to hyperplane Returns ------- np.array array of shape", "len(y) if n_labels <= 1: return 0 counts = np.bincount(y) proportions = counts", "np.ones(data.shape) data = data[:, col] self._up = data > 0 def part(self, origin:", "classes[card == max_card][0] self._belief = max_card / np.sum(card) else: self._belief = 1 try:", "distances to hyper plane of every class y : np.array vector of labels", "a split candidate Parameters ---------- labels : np.array labels of the dataset labels_up", "\"\"\" Nodes of the tree that keeps the svm classifier and if testing", "# in predcit time just use the column computed in train time #", "imp_up = imp_dn = 0 if labels_up is not None: card_up = labels_up.shape[0]", "col] <= 0] info_gain = self.information_gain(y, tup, tdn) if info_gain > max_gain: selected", "optional The strategy used to choose the feature set at each node (only", "None, criterion: str = None, feature_select: str = None, criteria: str = None,", "feature_set, 0.0, \"subset\" ) self.partition(dataset, node, train=True) y1, y2 = self.part(labels) gain =", "features w.r.t. their mutual info with the label. \"cfs\": Apply Correlation-based Feature Selection.", "every class y : np.array vector of labels (classes) Returns ------- np.array column", "every class y : np.array column of dataset to be taken into account", "feature_set return selected if selected is not None else feature_set @staticmethod def _generate_spaces(features:", "Classifier used X : np.ndarray input dataset in train time (only in testing)", "of the predictor and its belief based on the subdataset of the node", "if max_features < num_features). Supported strategies are: “best”: sklearn SelectKBest algorithm is used", "bool, optional If standardization of features should be applied on each node with", "select the class with max number of samples _, samples = np.unique(y, return_counts=True)", "taken into account to split dataset Returns ------- np.array column of dataset to", "def _fs_mutual( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Return the", "dataset Returns ------- list list with up to 5 combination of features randomly", "def information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array ) -> float: \"\"\"Compute", "probability is False.Pass an int for reproducible output across multiple function calls, by", "min_samples_split : int, optional The minimum number of samples required to split an", "int for reproducible output across multiple function calls, by default None normalize :", "float entropy \"\"\" n_labels = len(y) if n_labels <= 1: return 0 counts", "is None def get_down(self) -> \"Snode\": return self._down def get_up(self) -> \"Snode\": return", "if not self.is_leaf(): return classes, card = np.unique(self._y, return_counts=True) if len(classes) > 1:", "by default None min_samples_split : int, optional The minimum number of samples required", "return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" ) class Siterator: \"\"\"Stree preorder", "of the features selected \"\"\" # No feature reduction n_features = dataset.shape[1] if", "them. \"mutual\": Chooses the best features w.r.t. their mutual info with the label.", "\"\"\" mufs = MUFS(max_features=max_features, discrete=False) return mufs.fcbf(dataset, labels, 5e-4).get_results() def partition_impurity(self, y: np.array)", "__init__( self, clf: SVC, X: np.ndarray, y: np.ndarray, features: np.array, impurity: float, title:", "dataset in two based on different criteria Parameters ---------- clf : SVC, optional", "default None feature_select : str, optional The strategy used to choose the feature", "_fs_fcbf( dataset: np.array, labels: np.array, max_features: int ) -> tuple: \"\"\"Fast Correlation-based Filter", "account to split dataset \"\"\" # select the class with max number of", "to establish up indices Parameters ---------- origin : np.array dataset to split Returns", "of the dataset labels_up : np.array labels of one side labels_dn : np.array", "clf self._title = title self._belief = 0.0 # Only store dataset in Testing", "function calls, by default None normalize : bool, optional If standardization of features", "node._X, node._y, node._features, node._impurity, node._title, node._sample_weight, node._scaler, ) def set_partition_column(self, col: int): self._partition_column", "@staticmethod def _max_samples(data: np.array, y: np.array) -> np.array: \"\"\"return column of dataset to", "= dataset.shape[1] features_sets = self._generate_spaces(n_features, max_features) return self._select_best_set(dataset, labels, features_sets) @staticmethod def _fs_best(", "with max number of samples _, samples = np.unique(y, return_counts=True) return np.argmax(samples) def", "StopIteration() node = self._stack.pop() self._push(node.get_up()) self._push(node.get_down()) return node class Splitter: \"\"\" Splits a", "the hyperplane of the node Parameters ---------- node : Snode node containing the", "int ) -> tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters ---------- dataset", "# of classes \"\"\" X_transformed = data[:, node._features] if self._normalize: X_transformed = node._scaler.transform(X_transformed)", "side Returns ------- float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up = card_dn", ") -> tuple: \"\"\"Correlattion-based feature selection with max_features limit Parameters ---------- dataset :", "be gini or entropy ValueError criteria has to be max_samples or impurity ValueError", "Returns ------- tuple indices of the features selected \"\"\" # No feature reduction", "StandardScaler = None, ): self._clf = clf self._title = title self._belief = 0.0", "to compute the column to take into # account to split the dataset", "title def set_classifier(self, clf): self._clf = clf def set_features(self, features): self._features = features", "node (only used if max_features < num_features). Supported strategies are: “best”: sklearn SelectKBest", "- max_features) ) set_length = min(5, number) while len(comb) < set_length: comb.add( tuple(sorted(random.sample(range(features),", "= # classes data = self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there", "import StandardScaler from sklearn.svm import SVC from sklearn.exceptions import ConvergenceWarning from mufs import", "impurity={self._impurity:.4f} \" f\"counts={count_values}\" ) return ( f\"{self._title} feaures={self._features} impurity=\" f\"{self._impurity:.4f} \" f\"counts={count_values}\" )", "def is_leaf(self) -> bool: return self._up is None and self._down is None def", "None, feature_select: str = None, criteria: str = None, min_samples_split: int = None,", "# array of (m, nc) nc = # classes data = self._distances(node, samples)", "max_features : int number of features of the subspace (< number of features", "np.array set of labels Returns ------- float entropy \"\"\" n_labels = len(y) if", "the best of five random feature set combinations Parameters ---------- dataset : np.array", "Train time - True / Test time - False \"\"\" # data contains", "of every sample to the hyperplane of every class. nc = # of", "\"\"\"Generate at most 5 feature random combinations Parameters ---------- features : int number", "= son def set_title(self, title): self._title = title def set_classifier(self, clf): self._clf =", "with up to 5 combination of features randomly selected \"\"\" comb = set()", "None self._y = y self._down = None self._up = None self._class = None", "max_features: int ) -> tuple: \"\"\"Compute the indices of the features selected by", "or entropy ValueError criteria has to be max_samples or impurity ValueError splitter must", ": np.array array of samples (# samples, # features) node : Snode Node", "tdn) if info_gain > max_gain: selected = col max_gain = info_gain return selected", "information gain Parameters ---------- dataset : np.array array of samples (# samples, #", "other side Returns ------- float information gain \"\"\" imp_prev = self.criterion_function(labels) card_up =", "import warnings import random from math import log, factorial import numpy as np", "return_counts=True) if self.is_leaf(): return ( f\"{self._title} - Leaf class={self._class} belief=\" f\"{self._belief: .6f} impurity={self._impurity:.4f}", "self._partition_column: int = -1 self._scaler = scaler @classmethod def copy(cls, node: \"Snode\") ->", "SelectKBest, mutual_info_classif from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.exceptions import", "= self._distances(node, samples) if data.shape[0] < self._min_samples_split: # there aren't enough samples to", "math import log, factorial import numpy as np from sklearn.feature_selection import SelectKBest, mutual_info_classif", "max_features: int ) -> tuple: \"\"\"Re3turn a subspace of the selected dataset of", "return self._up is None and self._down is None def get_down(self) -> \"Snode\": return", "entropy def information_gain( self, labels: np.array, labels_up: np.array, labels_dn: np.array ) -> float:", "max_samples or impurity; got ({criteria})\" ) if feature_select not in [\"random\", \"best\", \"mutual\",", "np.array: return self._features def set_up(self, son): self._up = son def is_leaf(self) -> bool:", "discrete=False) return mufs.cfs(dataset, labels).get_results() @staticmethod def _fs_fcbf( dataset: np.array, labels: np.array, max_features: int" ]
[ "make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated == 1: message_bit = \"1", "('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add", "= \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\"", "def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated == 1: message_bit =", "= 'a') if row_updated == 1: message_bit = \"1 session was \" else:", "= \"1 session was \" else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s", "( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media here\"})", "\"1 session was \" else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit)", "were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display", "search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated =", "\",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media here\"}) ) admin.site.register(Speaker,SpeakerAdmin) admin.site.register(Track,TrackAdmin)", "session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General Information", "list_display = ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def", "('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated", "SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',]", "approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}),", "1: message_bit = \"1 session was \" else: message_bit = \"%s session were", "if row_updated == 1: message_bit = \"1 session was \" else: message_bit =", "row_updated == 1: message_bit = \"1 session was \" else: message_bit = \"%s", "list_display = ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"),", "approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets", "import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract']", "else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s)", "= \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = (", "import admin from app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display =", "as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\":", "class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social", "Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media here\"}) ) admin.site.register(Speaker,SpeakerAdmin)", "list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions", "session was \" else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description", "['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status =", "session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin):", "fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social", "= ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status", "(\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media here\"}) )", "['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated == 1: message_bit", "\"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display =", "= ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated == 1:", "django.contrib import admin from app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display", "= ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if", "row_updated = queryset.update(status = 'a') if row_updated == 1: message_bit = \"1 session", "* class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter", "TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',)", "actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a') if row_updated ==", "queryset.update(status = 'a') if row_updated == 1: message_bit = \"1 session was \"", "class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter =", "('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated", "\"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General", "\"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class", "(\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media here\"}) ) admin.site.register(Speaker,SpeakerAdmin) admin.site.register(Track,TrackAdmin) admin.site.register(Session,SessionAdmin)", "list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset): row_updated = queryset.update(status = 'a')", "message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as", "make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets =", "= ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"), \"description\":\"Add social media", "from django.contrib import admin from app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin):", "= ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions = ['make_approved',] def make_approved(self,request,queryset):", "message_bit = \"1 session was \" else: message_bit = \"%s session were \"%row_updated", "class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields = ['title','abstract'] list_filter = ('track','speaker',) actions =", "\" else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark", "= queryset.update(status = 'a') if row_updated == 1: message_bit = \"1 session was", "= ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{ \"classes\":(\"collapse\"), \"fields\":(\"twitter\",\"facebook\"),", "SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',) fieldsets = ( (\"General Information \",{\"fields\": (\"name\",\"bio\",)}), (\"Social Media\",{", "== 1: message_bit = \"1 session was \" else: message_bit = \"%s session", "app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields =", "from app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',) search_fields", "'a') if row_updated == 1: message_bit = \"1 session was \" else: message_bit", "admin from app.models import * class TrackAdmin(admin.ModelAdmin): list_display=('title','description',) class SessionAdmin(admin.ModelAdmin): list_display = ('title','status',)", "self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description = \"Mark session(s) as approved\" class SpeakerAdmin(admin.ModelAdmin): list_display = ('name','bio',)", "was \" else: message_bit = \"%s session were \"%row_updated self.message_user(request,\"%s approved\"%message_bit) make_approved.short_description =" ]
[ "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None", "= True if is_dirty else False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()),", "'was allowed to be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass", ") def test_set_is_new(self): # Tests the _set_is_new method of the BaseDataObject class test_object", ") test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value '", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected actual = test_object.is_new", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected", "correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the ' '_get_modified method", "Tests the _set_created method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the ' '_del_is_active method as", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the _get_is_dirty method", "the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for", "passed an is_deleted value ' 'of \"%s\" (%s), but %s was raised instead:\\n'", "getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the '", "- datetimes outside the UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string", "'BaseDataObject.created is expected to use the ' '_del_created method as its deleter-method' )", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new", "__init__ method of the BaseDataObject class # - All we need to do", "item here (BaseDataObject._del_property_name) should # # be changed to None, and the failure", "if passed an is_new value ' 'of \"%s\" (%s), but %s was raised", "leave None in the ' 'underlying storage attribute, but \"%s\" (%s) was '", "deleter-method' ) def testis_dirty(self): # Tests the is_dirty property of the BaseDataObject class", "should return a known ' 'error-message, but the message returned ' 'was not", "(%s) through the property, but \"%s\" (%s) ' 'was returned instead' % (", "self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the '", "epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string", "Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to", "datetime import datetime from uuid import UUID, uuid4 ####################################### # Third-party imports needed", "test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_deleted", "get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class", "instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if not results.errors and not results.failures:", "# LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular # # dependencies. Avoid if", "# Tests the _del_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active", "# Tests the property_name property of the BaseDataObject class # # - Assert", "in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False actual = test_object.is_deleted", "Test all \"good\" values for modified in GoodDateTimes: if type(modified) == datetime: expected", "type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a '", "(is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", ") # # - If property_name is not expected to be publicly deletable,", "Defines unit-tests for the module at hms_core.data_objects. \"\"\" ####################################### # Any needed from", "unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the module # ####################################### class", "with an ' 'underlying None value' ) def test_set_created(self): # Tests the _set_created", "BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the ' '_set_created method as its", "# BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the ' #", "allowed to be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except", "types (1,2), tuple(), True, False, object(), # - invalid values 'true', '', '1911-01-01", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a value of", "should not accept ' '\"%s\" (%s) as valid is_deleted values, but it '", "# # - Assert that the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel,", "in (int, float): expected = datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime(", "is expected to use the ' '_set_oid method as its setter-method' ) #", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty", "is_deleted = True is_dirty = True is_new = False test_object = BaseDataObjectDerived( oid,", "[ 'true', '', (1,2), tuple() ] GoodDateTimes = [ # - actual datetime", "# ####################################### ####################################### # Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### #", "test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should", "it\\'s retrieved from an instance ' 'with an underlying None value' ) def", "in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s)", "'BaseDataObject.oid is expected to use the ' '_get_oid method as its getter-method' )", "type(actual).__name__, ) ) # - Test all \"bad\" values for created in BadDateTimes:", "' '_del_is_active method as its deleter-method' ) def testis_deleted(self): # Tests the is_deleted", "return ' '\"%s\" (%s) through the property, but \"%s\" (%s) ' 'was returned", "' '_get_is_dirty method as its getter-method' ) # - Assert that the setter", "as data_objects from hms_core.data_objects import * ####################################### # Constants for test-methods # #######################################", "import member\" use # ####################################### __all__ = [ # Test-case classes # Child", "import sys import unittest from datetime import datetime from uuid import UUID, uuid4", "\"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not", "None, 'BaseDataObject._del_modified should leave None in the ' 'underlying storage attribute, but \"%s\"", "test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return", "timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56',", "expected, '_get_created was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "actual = test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s) should return", "but ' 'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ )", "the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the", "BaseDataObjectDerived() expected = 'expected value' test_object._created = expected actual = test_object.created self.assertEquals(actual, expected,", "# # Tests the property_name property of the BaseDataObject class # # -", "datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual", "%s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests", ") # - Test all \"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified)", "= False for dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save()", "LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the module # #######################################", "test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s) should return ' '\"%s\"", "values, but it ' 'was allowed to be set' % (is_active, type(is_active).__name__) )", "'BaseDataObject.save did not raise the ' 'expected error while being tested' ) #", "# Create an \"__all__\" list to support # # \"from module import member\"", "# Tests the is_deleted property of the BaseDataObject class # - Assert that", "that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use", "Tests the __init__ method of the BaseDataObject class # - All we need", "'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty method as its getter-method' )", "self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) #", "tuple() ] GoodDateTimes = [ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(),", "'Setting is_new to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_active", "is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the ' '_set_is_new", "its getter-method' # ) # # - If property_name is not expected to", "an is_active value ' 'of \"%s\" (%s), but %s was raised instead:\\n' '", "# Tests the _get_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "of class properties # ################################### def testcreated(self): # Tests the created property of", "= BaseDataObjectDerived() # - Test all \"good\" values for modified in GoodDateTimes: if", "# Tests the modified property of the BaseDataObject class # - Assert that", "property, but \"%s\" ' '(%s) was returned instead.' % ( oid, type(oid).__name__, expected,", "= BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave", "type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "known ' 'error-message, but the message returned ' 'was not what was expected'", "# - Assert that the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, #", "'_del_oid method as its deleter-method' ) # def testproperty_name(self): # # Tests the", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests the _get_is_new method", "invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ # - actual", "BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the ' '_set_oid method as its setter-method'", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified =", "as its deleter-method' ) def testis_new(self): # Tests the is_new property of the", "= BaseDataObjectDerived() # - Test all \"good\" values for created in GoodDateTimes: if", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_new", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty,", "execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve", "module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None,", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel,", "= datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created)", "the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is", "expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for", "self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s) should return ' '\"%s\" (%s)", ") ) def test_set_modified(self): # Tests the _set_modified method of the BaseDataObject class", "(int, float): expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime( modified,", "self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid,", "' '_set_created method as its setter-method' ) # - Assert that the deleter", ") def test_set_oid(self): # Tests the _set_oid method of the BaseDataObject class test_object", ") ) def test_del_is_dirty(self): # Tests the _del_is_dirty method of the BaseDataObject class", ") LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases to execute #", "method as its getter-method' ) # - Assert that the setter is correct:", "is_dirty else False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\"", "test_object.created self.assertEquals(actual, expected, '_get_created was expected to return \"%s\" (%s), but ' 'returned", "to use the ' '_set_is_dirty method as its setter-method' ) # - Assert", "run directly. # ####################################### if __name__ == '__main__': import time results = unittest.TestResult()", "1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes", "' 'expected error while being tested' ) ################################### # Tests of class properties", "as its deleter-method' ) def testis_dirty(self): # Tests the is_dirty property of the", "method as its deleter-method' ) # def testproperty_name(self): # # Tests the property_name", "test-case and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule", "= 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the", "( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "'with an underlying None value' ) def test_get_oid(self): # Tests the _get_oid method", "(%s) was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self):", "created to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted method as its", "ValueError if passed a modified value of ' '\"%s\" (%s), but %s was", "# - datetimes outside the UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56',", "else False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s)", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should", "# Any needed from __future__ imports # # Create an \"__all__\" list to", "type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the _get_is_dirty method of", "for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False actual", ") def test_get_is_active(self): # Tests the _get_is_active method of the BaseDataObject class test_object", "objects should raise TypeError ' 'or ValueError if passed an is_new value '", "error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_create, as required", "# Tests the is_active property of the BaseDataObject class # - Assert that", "Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to", "to use the ' # '_set_property_name method as its setter-method' # ) #", "is expected to use the ' '_del_oid method as its deleter-method' ) #", "passed a created value of ' '\"%s\" (%s), but %s was raised instead:\\n'", "oid, but %s was raised instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__,", "test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the ' 'underlying storage attribute, but", "the property, but \"%s\" (%s) ' 'was returned instead' % ( is_active, type(is_active).__name__,", "expected to use the ' '_set_oid method as its setter-method' ) # -", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid", "uuid import UUID, uuid4 ####################################### # Third-party imports needed # ####################################### ####################################### #", "is expected to use the ' # '_set_property_name method as its setter-method' #", "to support # # \"from module import member\" use # ####################################### __all__ =", "actual, expected, 'Setting is_deleted to \"%s\" (%s) should return ' '\"%s\" (%s) through", "to _create: test_object._is_new = True for dirty in (True, False, None): test_object._is_dirty =", "= BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave", "imports needed # ####################################### ####################################### # Local imports needed # ####################################### from idic.unit_testing", "####################################### # Import the module being tested # ####################################### import hms_core.data_objects as data_objects", "_set_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "'was returned instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "expected to use the ' '_set_is_deleted method as its setter-method' ) # -", "# ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage", "- Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): #", "_del_created method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value'", "expected' ) except Exception as error: self.fail( 'BaseDataObject.save did not raise the '", "= False is_deleted = True is_dirty = True is_new = False test_object =", "= BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual,", "the ' '_set_is_active method as its setter-method' ) # - Assert that the", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests", "# BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_del_property_name method", "\"\"\" ####################################### # Any needed from __future__ imports # # Create an \"__all__\"", "' '_set_oid method as its setter-method' ) # - Assert that the deleter", "error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests the _set_is_new method of the", "def test_del_oid(self): # Tests the _del_oid method of the BaseDataObject class test_object =", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified,", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created =", "BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the ' '_get_oid method as its getter-method'", "\"%s\" (%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): #", ") def test_get_modified(self): # Tests the _get_modified method of the BaseDataObject class test_object", "self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "accept \"%s\" ' '(%s) as modified values, but it was allowed to '", "####################################### __all__ = [ # Test-case classes # Child test-modules ] ####################################### #", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active,", "correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted method", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset,", "% ( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests the", "created elif type(created) in (int, float): expected = datetime.fromtimestamp(created) elif type(created) == str:", "the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the", "test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in the ' 'underlying storage", "use the ' '_get_oid method as its getter-method' ) # - Assert that", "'_get_created method as its getter-method' ) # - Assert that the setter is", "LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular # # dependencies. Avoid if possible.", "class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted", "in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None,", "in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False actual = test_object.is_dirty", "False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ] GoodDateTimes", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals(", ") # - Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new)", "use the ' '_set_is_new method as its setter-method' ) # - Assert that", "unittest.TestSuite() ####################################### # Import the module being tested # ####################################### import hms_core.data_objects as", "Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid()", "'or ValueError if passed an is_dirty value ' 'of \"%s\" (%s), but %s", "# ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ] BadBooleanOrIntEquivalents = [", ") def test_get_is_new(self): # Tests the _get_is_new method of the BaseDataObject class test_object", "test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s) should return ' '\"%s\"", "'expected value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected", "expected to use the ' '_del_oid method as its deleter-method' ) # def", "but it was allowed to ' 'be set' % (modified, type(modified).__name__) ) except", "if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_update, as required by", "# Tests the is_new property of the BaseDataObject class # - Assert that", "'_set_property_name method as its setter-method' # ) # # - If property_name is", "valid is_new values, but it ' 'was allowed to be set' % (is_new,", "Tests the oid property of the BaseDataObject class # - Assert that the", "type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "was ' 'allowed to be set' % (oid, type(oid).__name__) ) except (TypeError, ValueError):", "message returned ' 'was not what was expected' ) except Exception as error:", "'or ValueError if passed an is_new value ' 'of \"%s\" (%s), but %s", "str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000',", "str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual,", "self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime value if it\\'s retrieved from", "self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted method as", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for created in", "== '__main__': import time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime =", "storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__", "%s' % ( modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): # Tests", "Test all \"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects", "BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the ' '_del_modified method as its deleter-method'", "= 'expected value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was", "value' test_object._created = expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected to", "deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the '", "_testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the", "# - If property_name is not expected to be publicly settable, # #", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_dirty value", "expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\" (%s),", "deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the '", "def test_del_is_active(self): # Tests the _del_is_active method of the BaseDataObject class test_object =", "self.fail( 'BaseDataObject.save did not raise the ' 'expected error while being tested' )", "return a UUID value ' 'if it\\'s retrieved from an instance with an", "_get_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "the _get_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "TypeError ' 'or ValueError if passed a created value of ' '\"%s\" (%s),", "== str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual(", "expected to use the ' '_set_is_active method as its setter-method' ) # -", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified", "testis_deleted(self): # Tests the is_deleted property of the BaseDataObject class # - Assert", "not raise the ' 'expected error while being tested' ) # - Set", "def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict):", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals(", "= 'expected value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was", "Tests the modified property of the BaseDataObject class # - Assert that the", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected", ") test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s)", "type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the _del_oid method of the BaseDataObject", "return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod", "actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\" (%s), but", "# Tests the _get_created method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "the _set_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018, all rights reserved' __status__ =", "\"__all__\" list to support # # \"from module import member\" use # #######################################", "that the various # setter- and deleter-method calls are operating as # expected.", "oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "an instance ' 'with an underlying None value' ) def test_get_oid(self): # Tests", "actual, expected, 'Setting is_new to \"%s\" (%s) should return ' '\"%s\" (%s) through", "try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_deleted in", "is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the ' '_get_created", "imports needed # ####################################### from idic.unit_testing import * ####################################### # Initialization needed before", "UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000',", "False, 'BaseDataObject._del_is_dirty should leave None in the ' 'underlying storage attribute, but \"%s\"", "values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ # - actual UUID", "( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the _del_is_new method of", "# happen after member definition. # ####################################### ####################################### # Code to execute if", ") ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime", "12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside the UNIX epoch, just", "is expected to use the ' '_get_modified method as its getter-method' ) #", "value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to", "'datetime value if it\\'s retrieved from an instance ' 'with an underlying None", "_del_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value'", "the various # setter- and deleter-method calls are operating as # expected. #", "' # '_del_property_name method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject )", "oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept ' '\"%s\"", "BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected,", ") ) def test_get_modified(self): # Tests the _get_modified method of the BaseDataObject class", "before member # # definition can take place # ####################################### ####################################### # Module-level", "was allowed to ' 'be set' % (modified, type(modified).__name__) ) except (TypeError, ValueError):", "None) # - setters oid = uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1]", "definition. # ####################################### ####################################### # Code to execute if file is called #", "definition can take place # ####################################### ####################################### # Module-level Constants # ####################################### LocalSuite", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._modified,", "BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None", "as error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed", "' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests", "the ' '_get_is_active method as its getter-method' ) # - Assert that the", "values, but it ' 'was allowed to be set' % (is_deleted, type(is_deleted).__name__) )", ") except Exception as error: self.fail( 'BaseDataObject.save did not raise the ' 'expected", "= expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\"", "] GoodDateTimes = [ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), #", "# # Create an \"__all__\" list to support # # \"from module import", "' '_del_oid method as its deleter-method' ) # def testproperty_name(self): # # Tests", "'or ValueError if passed an is_active value ' 'of \"%s\" (%s), but %s", "' '_get_is_active method as its getter-method' ) # - Assert that the setter", "Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to", "object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [", "value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to", "# - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ #", "= [ # Test-case classes # Child test-modules ] ####################################### # Module metadata/dunder-names", "- setters oid = uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active =", "for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept '", "method as its deleter-method' ) def testmodified(self): # Tests the modified property of", "to be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception", "\"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else", "# Tests the _del_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified", "Tests the _get_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to", "(%s), but %s was raised instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__,", "did not raise the ' 'expected error while being tested' ) # -", "correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the ' '_del_is_new method", "is_dirty property of the BaseDataObject class # - Assert that the getter is", "def testproperty_name(self): # # Tests the property_name property of the BaseDataObject class #", "testBaseDataObject ) ) ####################################### # Child-module test-cases to execute # ####################################### # import", "'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # - invalid types", "setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the '", "# Initialization needed before member # # definition can take place # #######################################", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active", "test_del_is_new(self): # Tests the _del_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived()", "as error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_update, as", "type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the", "modified = GoodDateTimes[1] is_active = False is_deleted = True is_dirty = True is_new", "but \"%s\" (%s) ' 'was returned instead' % ( created, type(created).__name__, expected, type(expected).__name__,", "testis_new(self): # Tests the is_new property of the BaseDataObject class # - Assert", "'Copyright 2018, all rights reserved' __status__ = 'Development' ####################################### # Standard library imports", "is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests", "setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the '", "the ' '_del_is_deleted method as its deleter-method' ) def testis_dirty(self): # Tests the", "import os import sys import unittest from datetime import datetime from uuid import", "delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria):", "'Calling _create should return a known ' 'error-message, but the message returned '", "( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests the _set_is_deleted", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents:", "expected = True if is_new else False actual = test_object.is_new self.assertEqual( actual, expected,", "Tests the save method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "12:34:56.123456' ] ####################################### # Code-coverage test-case and # # decorator-methods # ####################################### class", "invalid types (1,2), tuple(), True, False, object(), # - invalid values 'true', '',", "= test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s) should return '", "Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to", "= BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual,", "expected = True if is_deleted else False actual = test_object.is_deleted self.assertEqual( actual, expected,", "else False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s)", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified,", "(oid, type(oid).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the module being", "valid is_active values, but it ' 'was allowed to be set' % (is_active,", "is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted", "expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\" (%s),", "' 'was returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the ' '_set_is_active method", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified =", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_dirty", "actual, expected, 'Setting modified to \"%s\" (%s) should return ' '\"%s\" (%s) through", "self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the ' 'underlying storage attribute,", "'00000000000000000000000000000000', ] BadOIDs = [ # - invalid types (1,2), tuple(), True, False,", "raise the ' 'expected error while being tested' ) # - Set things", "time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime", "failure message adjusted # # accordingly: # # - Assert that the deleter", "instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self):", "oid property of the BaseDataObject class # - Assert that the getter is", "datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01", "'_set_modified method as its setter-method' ) # - Assert that the deleter is", "testcreated(self): # Tests the created property of the BaseDataObject class # - Assert", "correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the ' '_get_is_new method", "type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return", "(%s) as a valid oid, but it was ' 'allowed to be set'", "None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime value if it\\'s retrieved", "else False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s)", "was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): #", "[ # Test-case classes # Child test-modules ] ####################################### # Module metadata/dunder-names #", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected", "' '_del_is_deleted method as its deleter-method' ) def testis_dirty(self): # Tests the is_dirty", "expected to use the ' '_del_is_dirty method as its deleter-method' ) def testis_new(self):", "\"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else", "= BaseDataObjectDerived() # - Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active)", "UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56',", "but %s was raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for oid", "True, 'BaseDataObject._del_is_active should leave None in the ' 'underlying storage attribute, but \"%s\"", "as # expected. # - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active,", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self):", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for oid", "to use the ' '_get_is_deleted method as its getter-method' ) # - Assert", "was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): #", "try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept ' '\"%s\" (%s) as a", ") def test_set_is_deleted(self): # Tests the _set_is_deleted method of the BaseDataObject class test_object", "instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self):", "' 'error-message, but the message returned ' 'was not what was expected' )", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_new value", "should raise TypeError ' 'or ValueError if passed a created value of '", "value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the ' 'underlying", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected actual", "# Tests the _del_created method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created", "set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for modified in", "is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept ' '\"%s\"", "12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ #", "- Set things up to force a call to _update: test_object._is_new = False", ") def test_del_oid(self): # Tests the _del_oid method of the BaseDataObject class test_object", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests", "% ( modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): # Tests the", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset,", "actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests the _get_is_new method of the", "# accordingly: # # - Assert that the deleter is correct: # self.assertEqual(", "expected to use the ' # '_get_property_name method as its getter-method' # )", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_dirty in", "type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return", "# ####################################### ####################################### # Code to execute if file is called # #", "objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods", "expected, 'Setting modified to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__", ") ) def test_del_is_active(self): # Tests the _del_is_active method of the BaseDataObject class", "for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else False actual", "'_get_is_new method as its getter-method' ) # - Assert that the setter is", "1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # -", ") ) # - Test all \"bad\" values for modified in BadDateTimes: try:", "the _del_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active,", "that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use", "__init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created,", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is", "an is_new value ' 'of \"%s\" (%s), but %s was raised instead:\\n' '", "'_get_modified was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "Test all \"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects", "publicly deletable, # # the second item here (BaseDataObject._del_property_name) should # # be", "allowed to be set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except", "is_new to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "' 'as an oid, but %s was raised instead:\\n' ' %s' % (", "'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the ' 'underlying storage attribute, but", "' '_del_is_new method as its deleter-method' ) def testmodified(self): # Tests the modified", "ValueError if passed a value of \"%s\" (%s) ' 'as an oid, but", "Standard library imports needed # ####################################### import os import sys import unittest from", "values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept", "it ' 'was allowed to be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError,", "class # - All we need to do here is prove that the", "test_get_is_new(self): # Tests the _get_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived()", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): #", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is", "deleter-method' ) def testis_active(self): # Tests the is_active property of the BaseDataObject class", "# - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case", "its deleter-method' ) # def testproperty_name(self): # # Tests the property_name property of", "BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the ' '_del_oid method as its", "12:34:56.123456' ] GoodOIDs = [ # - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'),", "should raise TypeError ' 'or ValueError if passed a modified value of '", "but it ' 'was allowed to be set' % (is_new, type(is_new).__name__) ) except", "is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty, is_new ) def", ") def testis_active(self): # Tests the is_active property of the BaseDataObject class #", "objects should not accept ' '\"%s\" (%s) as valid is_active values, but it", "'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty method as its deleter-method' )", "an \"__all__\" list to support # # \"from module import member\" use #", "def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids,", "not accept \"%s\" ' '(%s) as created values, but it was allowed to", "expected, '_get_oid was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "return a ' 'datetime value if it\\'s retrieved from an instance ' 'with", "raise the ' 'expected error while being tested' ) ################################### # Tests of", "'(%s) as modified values, but it was allowed to ' 'be set' %", "is expected to use the ' '_set_is_active method as its setter-method' ) #", "objects should raise TypeError ' 'or ValueError if passed an is_deleted value '", "if passed a created value of ' '\"%s\" (%s), but %s was raised", "\"\"\" Defines unit-tests for the module at hms_core.data_objects. \"\"\" ####################################### # Any needed", "created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "'_get_is_active method as its getter-method' ) # - Assert that the setter is", "if type(oid) == UUID: expected = oid elif type(oid) == str: expected =", "# expected. # - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True)", "% ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the _del_is_active method", "in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s)", "should not accept ' '\"%s\" (%s) as valid is_dirty values, but it '", "'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created", "'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def", "datetime: expected = modified elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif", "testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) #######################################", "False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### #", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected", "happen after member definition. # ####################################### ####################################### # Code to execute if file", "'BaseDataObject.save did not raise the ' 'expected error while being tested' ) ###################################", "to None, and the failure message adjusted # # accordingly: # # -", "we need to do here is prove that the various # setter- and", "'_get_created was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the ' 'underlying", "circular # # dependencies. Avoid if possible. # ####################################### ####################################### # Initialization that", "accordingly: # # - Assert that the setter is correct: # self.assertEqual( #", "' 'with an underlying None value' ) def test_get_is_active(self): # Tests the _get_is_active", "self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_dirty values,", "test_object = BaseDataObjectDerived() # - Set things up to force a call to", "is expected to use the ' '_del_is_new method as its deleter-method' ) def", "that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use", "# Code to execute if file is called # # or run directly.", "deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the '", "metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018, all rights reserved'", "'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # - invalid", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted()", "changed to None, and the failure message adjusted # # accordingly: # #", "was raised instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error ) )", "- Test all \"good\" values for oid in GoodOIDs: if type(oid) == UUID:", "Child-module test-cases to execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### #", "# Code-coverage test-case and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace =", "= datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting", "BaseDataObjectDerived() # - Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected", "is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the ' '_del_is_new method as its", ") ) # - Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try:", "12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ),", "# - Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail(", "- Test all \"good\" values for modified in GoodDateTimes: if type(modified) == datetime:", "( modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): # Tests the _set_oid", "the ' '_set_oid method as its setter-method' ) # - Assert that the", "'BaseDataObject.is_active is expected to use the ' '_set_is_active method as its setter-method' )", "' '_set_modified method as its setter-method' ) # - Assert that the deleter", "value' ) def test_set_created(self): # Tests the _set_created method of the BaseDataObject class", "'Setting modified to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "@classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests", "datetime, 'BaseDataObject._get_created should return a ' 'datetime value if it\\'s retrieved from an", "BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime,", "# \"from module import member\" use # ####################################### __all__ = [ # Test-case", "objects should not accept ' '\"%s\" (%s) as valid is_deleted values, but it", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset,", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid =", "' '\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( modified,", "that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use", "is expected to use the ' '_del_is_dirty method as its deleter-method' ) def", "\"%s\" ' '(%s) as modified values, but it was allowed to ' 'be", "def test_set_is_deleted(self): # Tests the _set_is_deleted method of the BaseDataObject class test_object =", "'expected error while being tested' ) ################################### # Tests of class properties #", "' '\"%s\" (%s) through the property, but \"%s\" ' '(%s) was returned instead.'", "that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use", "correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use", "ValueError if passed an is_active value ' 'of \"%s\" (%s), but %s was", "# - timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56',", "Tests of class properties # ################################### def testcreated(self): # Tests the created property", "# Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None,", "= None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value ' 'if it\\'s", "'or ValueError if passed a created value of ' '\"%s\" (%s), but %s", "is expected to use the ' '_del_modified method as its deleter-method' ) def", "the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is", "= 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the", "'(%s) as created values, but it was allowed to ' 'be set' %", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid,", "actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff',", "(%s), but ' 'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__", ") test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime value", "# - Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected =", "self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid =", "If property_name is not expected to be publicly deletable, # # the second", "- actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254',", "expected to be publicly settable, # # the second item here (BaseDataObject._set_property_name) should", "test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in", "# # - If property_name is not expected to be publicly settable, #", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True,", "self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the ' 'underlying storage attribute,", ") def test_del_is_dirty(self): # Tests the _del_is_dirty method of the BaseDataObject class test_object", "' 'was allowed to be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError):", "'was returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "the message returned ' 'was not what was expected' ) except Exception as", "correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted method", "is expected to use the ' '_get_is_active method as its getter-method' ) #", "expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected to return \"%s\" (%s),", "instance with an ' 'underlying None value' ) def test_set_created(self): # Tests the", "None value' ) def test_get_is_active(self): # Tests the _get_is_active method of the BaseDataObject", ") def testis_deleted(self): # Tests the is_deleted property of the BaseDataObject class #", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_active", "type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the '", "\"%s\" (%s) ' 'as an oid, but %s was raised instead:\\n' ' %s'", "value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to", "just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ),", "% (modified, type(modified).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the ' '_get_is_active", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected actual = test_object.is_active", "to use the ' # '_get_property_name method as its getter-method' # ) #", "'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in the '", "except NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has not implemented '", "expected to use the ' '_del_is_active method as its deleter-method' ) def testis_deleted(self):", "- Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject", "the modified property of the BaseDataObject class # - Assert that the getter", "unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases to execute # ####################################### #", "of the BaseDataObject class test_object = BaseDataObjectDerived() # - Set things up to", "all rights reserved' __status__ = 'Development' ####################################### # Standard library imports needed #", "is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the ' '_set_is_active", "retrieved from an instance with an ' 'underlying None value' ) def test_set_created(self):", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active,", "Tests the _set_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "the module at hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__ imports #", "raise TypeError ' 'or ValueError if passed an is_deleted value ' 'of \"%s\"", "the ' '_del_oid method as its deleter-method' ) # def testproperty_name(self): # #", "getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the '", "% ( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): # Tests the", "that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use", "# BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_get_property_name method", "= data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the module", "' '_get_is_new method as its getter-method' ) # - Assert that the setter", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_deleted in", "the _del_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected", "expected to use the ' '_set_is_new method as its setter-method' ) # -", "'BaseDataObject.oid is expected to use the ' '_set_oid method as its setter-method' )", "# - Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected =", "' 'or ValueError if passed an is_new value ' 'of \"%s\" (%s), but", "created property of the BaseDataObject class # - Assert that the getter is", "] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ] GoodDateTimes = [ #", "####################################### # Any needed from __future__ imports # # Create an \"__all__\" list", "' '\"%s\" (%s) as valid is_active values, but it ' 'was allowed to", "def test_get_is_deleted(self): # Tests the _get_is_deleted method of the BaseDataObject class test_object =", "' '_update, as required by BaseDataObject' ): self.fail( 'Calling _create should return a", "the property, but \"%s\" ' '(%s) was returned instead.' % ( oid, type(oid).__name__,", "'_update, as required by BaseDataObject' ): self.fail( 'Calling _create should return a known", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should", "Local imports needed # ####################################### from idic.unit_testing import * ####################################### # Initialization needed", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active()", "= time.time() - testStartTime PrintTestResults(results) if not results.errors and not results.failures: SaveTestReport(results, 'hms_core.data_objects',", "BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the ' '_set_is_active method as its", "pass except Exception as error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or", "testsave(self): # Tests the save method of the BaseDataObject class test_object = BaseDataObjectDerived()", "' # '_get_property_name method as its getter-method' # ) # # - If", "except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects should raise", "type(oid).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "expected to use the ' '_get_modified method as its getter-method' ) # -", ") self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new)", "deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the '", "prove that the various # setter- and deleter-method calls are operating as #", "error ) ) def test_set_is_new(self): # Tests the _set_is_new method of the BaseDataObject", "'error-message, but the message returned ' 'was not what was expected' ) except", "# Tests the _get_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "################################### def testcreated(self): # Tests the created property of the BaseDataObject class #", "is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept ' '\"%s\"", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new", "(%s) should return ' '\"%s\" (%s) through the property, but \"%s\" (%s) '", "= BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the _get_is_deleted", "is_deleted value ' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s'", "True, False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs", "expected to use the ' '_get_is_new method as its getter-method' ) # -", "being tested # ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import * #######################################", "class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty", "imports # # Create an \"__all__\" list to support # # \"from module", "error ) ) def testsave(self): # Tests the save method of the BaseDataObject", "retrieved from an instance ' 'with an underlying None value' ) def test_get_oid(self):", "type(modified) == datetime: expected = modified elif type(modified) in (int, float): expected =", "use the ' '_get_created method as its getter-method' ) # - Assert that", "test_set_created(self): # Tests the _set_created method of the BaseDataObject class test_object = BaseDataObjectDerived()", "BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def", "def test_del_is_deleted(self): # Tests the _del_is_deleted method of the BaseDataObject class test_object =", "an ' 'underlying None value' ) def test_set_created(self): # Tests the _set_created method", "second item here (BaseDataObject._del_property_name) should # # be changed to None, and the", "was raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error ) )", "type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the _get_is_deleted method of", "that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use", "'BaseDataObject.is_new is expected to use the ' '_get_is_new method as its getter-method' )", "float): expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string", "is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created", "to # # happen after member definition. # ####################################### ####################################### # Code to", "'3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside the UNIX epoch, just in", "is_dirty = True is_new = False test_object = BaseDataObjectDerived( oid, created, modified, is_active,", "expected = True if is_dirty else False actual = test_object.is_dirty self.assertEqual( actual, expected,", "created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests the _set_is_active method", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False,", "type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created", "(int, float): expected = datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime( created,", "the second item here (BaseDataObject._set_property_name) should # # be changed to None, and", "# self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the", "but it ' 'was allowed to be set' % (is_dirty, type(is_dirty).__name__) ) except", "test_get_created(self): # Tests the _get_created method of the BaseDataObject class test_object = BaseDataObjectDerived()", "test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the ' 'underlying storage", "\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__,", "the ' '_get_created method as its getter-method' ) # - Assert that the", "attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ )", "BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty method as its deleter-method'", "' 'or ValueError if passed an is_active value ' 'of \"%s\" (%s), but", "here (BaseDataObject._del_property_name) should # # be changed to None, and the failure message", "# BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_set_property_name method", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget,", "% (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "' 'or ValueError if passed a created value of ' '\"%s\" (%s), but", "'\"%s\" (%s) through the property, but \"%s\" (%s) ' 'was returned instead' %", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new =", "if passed a modified value of ' '\"%s\" (%s), but %s was raised", "place # ####################################### ####################################### # Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() #######################################", "tuple(), True, False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ]", "adjusted # # accordingly: # # - Assert that the setter is correct:", "'00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # - invalid types (1,2),", "deletable, # # the second item here (BaseDataObject._del_property_name) should # # be changed", "actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests the _get_modified method of the", "All we need to do here is prove that the various # setter-", "is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else False actual =", "was ' 'found instead' % ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): #", ") def testis_dirty(self): # Tests the is_dirty property of the BaseDataObject class #", "to execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to", "datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01", "to be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception", "' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' % (", "'\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( modified, type(modified).__name__,", "test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ] BadBooleanOrIntEquivalents =", "modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): # Tests the _set_oid method", "'was allowed to be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass", "type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "be changed to None, and the failure message adjusted # # accordingly: #", "BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the ' '_del_oid method as its deleter-method'", "def test_set_is_new(self): # Tests the _set_is_new method of the BaseDataObject class test_object =", "except Exception as error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError", "to use the ' '_get_is_dirty method as its getter-method' ) # - Assert", "if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_create, as required by", "_set_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "= test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\" (%s), but '", "class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods # ################################### def test__init__(self): #", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected", "self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the ' '_del_created method as", "'BaseDataObject._del_oid should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "use the ' '_del_is_active method as its deleter-method' ) def testis_deleted(self): # Tests", "use the ' '_del_is_deleted method as its deleter-method' ) def testis_dirty(self): # Tests", "type(actual).__name__, ) ) # - Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents:", "expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string )", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted =", "to use the ' '_del_is_active method as its deleter-method' ) def testis_deleted(self): #", "Tests the _del_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid =", "BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty, is_new ) def _create(self): return", "use the ' '_set_is_active method as its setter-method' ) # - Assert that", "def testoid(self): # Tests the oid property of the BaseDataObject class # -", "expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected,", "instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the _del_is_dirty", "the ' 'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' %", "= True if is_active else False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting", "'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in", ") # # - If property_name is not expected to be publicly settable,", "####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage )", "= True for dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save()", "actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a", "%s' % ( created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests", "created value of ' '\"%s\" (%s), but %s was raised instead:\\n' ' %s'", "(%s) through the property, but \"%s\" ' '(%s) was returned instead.' % (", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created", "results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results)", "to use the ' '_del_modified method as its deleter-method' ) def testoid(self): #", "return a known ' 'error-message, but the message returned ' 'was not what", "test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should", "'with an underlying None value' ) def test_get_is_active(self): # Tests the _get_is_active method", "\"%s\" (%s) ' 'was returned instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual,", "uuid4 ####################################### # Third-party imports needed # ####################################### ####################################### # Local imports needed", "to use the ' '_get_is_active method as its getter-method' ) # - Assert", "raise TypeError ' 'or ValueError if passed a value of \"%s\" (%s) '", "self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the ' 'underlying storage attribute,", "= BaseDataObjectDerived() # - Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted)", "that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use", "'expected value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected", "'or ValueError if passed a modified value of ' '\"%s\" (%s), but %s", "accept ' '\"%s\" (%s) as valid is_new values, but it ' 'was allowed", "Child test-modules ] ####################################### # Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected actual = test_object.is_deleted", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for created in GoodDateTimes:", "type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return", "- Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject", "# Imports to resolve circular # # dependencies. Avoid if possible. # #######################################", "datetimes outside the UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ),", "'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in the '", "= test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s) should return '", "strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside the UNIX", "( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the _del_oid method of", "directly. # ####################################### if __name__ == '__main__': import time results = unittest.TestResult() testStartTime", "the failure message adjusted # # accordingly: # # - Assert that the", "deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases to", "= expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected to return \"%s\"", "[ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers", "actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the _get_is_dirty method of the", "in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else False actual = test_object.is_new", "' 'with an underlying None value' ) def test_get_oid(self): # Tests the _get_oid", "that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is", "\"%s\" (%s) ' 'was returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual,", "as its setter-method' ) # - Assert that the deleter is correct: self.assertEqual(", "is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self,", "the is_new property of the BaseDataObject class # - Assert that the getter", "# - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890,", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created", "# the second item here (BaseDataObject._del_property_name) should # # be changed to None,", "value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the ' 'underlying", "is expected to use the ' '_set_is_deleted method as its setter-method' ) #", "self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified", "type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected,", "Avoid if possible. # ####################################### ####################################### # Initialization that needs to # #", "but \"%s\" (%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self):", "self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as modified values, but", "= 'Development' ####################################### # Standard library imports needed # ####################################### import os import", "BadDateTimes = [ # - invalid types (1,2), tuple(), True, False, object(), #", "# decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests(", "was returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "# - Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected =", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a value", "is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the ' '_set_oid", "' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified", "all \"good\" values for created in GoodDateTimes: if type(created) == datetime: expected =", "# Tests the __init__ method of the BaseDataObject class # - All we", "correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( is_dirty,", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( modified,", "import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular # # dependencies.", "the ' '_set_is_deleted method as its setter-method' ) # - Assert that the", "support # # \"from module import member\" use # ####################################### __all__ = [", "import * ####################################### # Initialization needed before member # # definition can take", "passed an is_new value ' 'of \"%s\" (%s), but %s was raised instead:\\n'", "modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted,", "BaseDataObject class # - All we need to do here is prove that", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected actual = test_object.modified", "# - Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail(", "# Standard library imports needed # ####################################### import os import sys import unittest", "# - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings", "deleter-method' ) def testis_new(self): # Tests the is_new property of the BaseDataObject class", "[ True, False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple()", "testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject):", "created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active,", "property, but \"%s\" (%s) ' 'was returned instead' % ( created, type(created).__name__, expected,", "# # happen after member definition. # ####################################### ####################################### # Code to execute", "a created value of ' '\"%s\" (%s), but %s was raised instead:\\n' '", "test-modules ] ####################################### # Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ =", "testmodified(self): # Tests the modified property of the BaseDataObject class # - Assert", "all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should", "Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the module being tested", "returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "# ################################### def testcreated(self): # Tests the created property of the BaseDataObject class", "is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests the _set_is_new method", "_set_created method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "at hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__ imports # # Create", "####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the module being tested # #######################################", ") test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s)", "is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new,", "BaseDataObjectDerived() # - Test all \"good\" values for created in GoodDateTimes: if type(created)", "True if is_dirty else False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty", "modified values, but it was allowed to ' 'be set' % (modified, type(modified).__name__)", "def test_set_is_active(self): # Tests the _set_is_active method of the BaseDataObject class test_object =", "BaseDataObjectDerived() # - Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected", "BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the ' '_del_is_active method as its deleter-method'", "the ' '_get_oid method as its getter-method' ) # - Assert that the", "LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases to execute # #######################################", "the _set_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "(%s) ' 'was returned instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_active,", "), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ # - invalid", "class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) )", "def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid,", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests the _get_modified", "the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the", "# the second item here (BaseDataObject._set_property_name) should # # be changed to None,", "passed an is_dirty value ' 'of \"%s\" (%s), but %s was raised instead:\\n'", "datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, # -", "expected = created elif type(created) in (int, float): expected = datetime.fromtimestamp(created) elif type(created)", "# Tests the created property of the BaseDataObject class # - Assert that", "def test_get_is_dirty(self): # Tests the _get_is_dirty method of the BaseDataObject class test_object =", "test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False actual = test_object.is_deleted self.assertEqual( actual,", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for created", "class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid", "but \"%s\" (%s) ' 'was returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__,", "is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted", "the _del_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected", "'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the", "value ' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' %", "is_active values, but it ' 'was allowed to be set' % (is_active, type(is_active).__name__)", "\"%s\" ' '(%s) was returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual,", "to force a call to _update: test_object._is_new = False for dirty in (True,", "as error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_create, as", "all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted", "test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in", "an instance with an ' 'underlying None value' ) def test_set_created(self): # Tests", "the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_deleted,", "# ####################################### ####################################### # Local imports needed # ####################################### from idic.unit_testing import *", "\"%s\" (%s) ' 'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual,", "self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the ' '_set_modified method as", "type(created) == datetime: expected = created elif type(created) in (int, float): expected =", "'BaseDataObject._get_modified should return a ' 'datetime value if it\\'s retrieved from an instance", "'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and # # decorator-methods", "' %s' % ( created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): #", "testis_dirty(self): # Tests the is_dirty property of the BaseDataObject class # - Assert", "test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid)", "is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty", "# # - If property_name is not expected to be publicly deletable, #", "valid is_deleted values, but it ' 'was allowed to be set' % (is_deleted,", "for oid in GoodOIDs: if type(oid) == UUID: expected = oid elif type(oid)", "from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects,", "None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as error: if str(error) !=", "BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as", "to be publicly settable, # # the second item here (BaseDataObject._set_property_name) should #", "required by BaseDataObject' ): self.fail( 'Calling _create should return a known ' 'error-message,", ") ################################### # Tests of class properties # ################################### def testcreated(self): # Tests", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected", "GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted = True is_dirty = True", "self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the '", "library imports needed # ####################################### import os import sys import unittest from datetime", "# Tests the _set_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() #", ") ) def test_del_oid(self): # Tests the _del_oid method of the BaseDataObject class", "is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the ' '_get_is_new", ") ) def test_set_is_new(self): # Tests the _set_is_new method of the BaseDataObject class", "False test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid,", "False for dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save() except", "'_del_is_active method as its deleter-method' ) def testis_deleted(self): # Tests the is_deleted property", "but %s was raised instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__, error", ") def test_get_is_deleted(self): # Tests the _get_is_deleted method of the BaseDataObject class test_object", "method as its deleter-method' ) def testis_new(self): # Tests the is_new property of", "= None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime value if it\\'s", "BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty method as its", "% ( created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests the", "values for oid in GoodOIDs: if type(oid) == UUID: expected = oid elif", "# # - Assert that the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset,", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected", "property_name is not expected to be publicly deletable, # # the second item", "test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is", "that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use", "# Tests the _set_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() #", ") def testmodified(self): # Tests the modified property of the BaseDataObject class #", "GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '',", "self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in the ' 'underlying storage attribute,", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected", "# ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the module being tested #", ") def testsave(self): # Tests the save method of the BaseDataObject class test_object", "is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty,", "####################################### # Code-coverage test-case and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace", "instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "self.assertEquals(test_object._oid, None) # - setters oid = uuid4() created = GoodDateTimes[0] modified =", "*oids, **criteria): pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase):", "its deleter-method' ) def testis_active(self): # Tests the is_active property of the BaseDataObject", "oid, created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active,", "to use the ' '_get_is_new method as its getter-method' ) # - Assert", "the _set_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "'Setting is_active to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, #", "\"%s\" (%s) was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def", "Exception as error: self.fail( 'BaseDataObject.save did not raise the ' 'expected error while", "\"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not", "type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): # Tests the _set_oid method of", "try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid", "self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the '", "####################################### # Local imports needed # ####################################### from idic.unit_testing import * ####################################### #", "(%s) ' 'was returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "' # '_set_property_name method as its setter-method' # ) # # - If", "that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use", "test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return", "expected = True if is_active else False actual = test_object.is_active self.assertEqual( actual, expected,", "oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): # Tests the save method", "# # definition can take place # ####################################### ####################################### # Module-level Constants #", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for modified", "None value' ) def test_set_created(self): # Tests the _set_created method of the BaseDataObject", "' 'expected error while being tested' ) # - Set things up to", "expected, '_get_is_active was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for modified", "'BaseDataObject._del_is_new should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "(is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted method as its getter-method' )", "= BaseDataObjectDerived() # - Test all \"good\" values for oid in GoodOIDs: if", "is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty,", "test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_dirty", "expected, 'Setting oid to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_update,", "values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and # #", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty,", "as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module", "but the message returned ' 'was not what was expected' ) except Exception", "class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new", "%s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): # Tests", "instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self):", "= [ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp", "= created elif type(created) in (int, float): expected = datetime.fromtimestamp(created) elif type(created) ==", "'_set_is_new method as its setter-method' ) # - Assert that the deleter is", ") def test_set_is_dirty(self): # Tests the _set_is_dirty method of the BaseDataObject class test_object", "is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the ' '_get_modified", "property_name property of the BaseDataObject class # # - Assert that the getter", "created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active)", "BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as", "properties # ################################### def testcreated(self): # Tests the created property of the BaseDataObject", "test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in", "TypeError ' 'or ValueError if passed an is_active value ' 'of \"%s\" (%s),", "type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "TypeError ' 'or ValueError if passed a value of \"%s\" (%s) ' 'as", "it ' 'was allowed to be set' % (is_active, type(is_active).__name__) ) except (TypeError,", "BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected,", ") ) def test_set_is_active(self): # Tests the _set_is_active method of the BaseDataObject class", "created values, but it was allowed to ' 'be set' % (created, type(created).__name__)", "- Test all \"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject", "'be set' % (created, type(created).__name__) ) except (TypeError, ValueError): pass except Exception as", "testis_active(self): # Tests the is_active property of the BaseDataObject class # - Assert", "is expected to use the ' '_get_is_dirty method as its getter-method' ) #", "deleter-method calls are operating as # expected. # - deleters first test_object =", "is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "(is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the ' '_del_created", "def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of", "error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests the _set_is_dirty method of the", "BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted method as its deleter-method'", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( created,", "test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in", "test_del_modified(self): # Tests the _del_modified method of the BaseDataObject class test_object = BaseDataObjectDerived()", "test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as created values,", "a call to _update: test_object._is_new = False for dirty in (True, False, None):", "expected to use the ' # '_del_property_name method as its deleter-method' # )", "= BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave", "should return ' '\"%s\" (%s) through the property, but \"%s\" ' '(%s) was", "passed a modified value of ' '\"%s\" (%s), but %s was raised instead:\\n'", "value of ' '\"%s\" (%s), but %s was raised instead:\\n' ' %s' %", "expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\" (%s),", "BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted method as its", "GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else False actual = test_object.is_active self.assertEqual(", "- Test all \"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is", "but %s was raised instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error", "values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept", "Test-case classes # Child test-modules ] ####################################### # Module metadata/dunder-names # ####################################### __author__", "testproperty_name(self): # # Tests the property_name property of the BaseDataObject class # #", "that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use", "' '_set_is_active method as its setter-method' ) # - Assert that the deleter", "for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else False actual", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None,", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self):", "False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s) should", "created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self):", "use the ' '_set_created method as its setter-method' ) # - Assert that", "None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value ' 'if it\\'s retrieved", "objects should raise TypeError ' 'or ValueError if passed a modified value of", "actual, expected, 'Setting oid to \"%s\" (%s) should return ' '\"%s\" (%s) through", "correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the ' '_get_oid method", "% (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "test_object._set_is_active(is_active) expected = True if is_active else False actual = test_object.is_active self.assertEqual( actual,", "'BaseDataObject.modified is expected to use the ' '_set_modified method as its setter-method' )", "instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the _del_is_deleted", "getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the '", "Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ]", "BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected,", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created,", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created,", "' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): #", "BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the ' '_get_is_active method as its getter-method'", "' 'was returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as modified", "(%s), but %s was raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__,", "def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls,", "_set_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "'BaseDataObjectDerived has not implemented ' '_update, as required by BaseDataObject' ): self.fail( 'Calling", "deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the '", "the _del_created method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected", "is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the ' '_del_is_active", "expected to use the ' '_set_modified method as its setter-method' ) # -", "If property_name is not expected to be publicly settable, # # the second", "(test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified method of the BaseDataObject", "elif type(created) in (int, float): expected = datetime.fromtimestamp(created) elif type(created) == str: expected", "**criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass", "modified value of ' '\"%s\" (%s), but %s was raised instead:\\n' ' %s'", "BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the ' '_del_is_new method as its deleter-method'", "if is_deleted else False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to", "be publicly deletable, # # the second item here (BaseDataObject._del_property_name) should # #", "def test_set_modified(self): # Tests the _set_modified method of the BaseDataObject class test_object =", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created", "expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid to", "use the ' '_set_oid method as its setter-method' ) # - Assert that", "the ' '_del_is_dirty method as its deleter-method' ) def testis_new(self): # Tests the", "# - If property_name is not expected to be publicly deletable, # #", "value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in the ' 'underlying", "instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "is expected to use the ' # '_get_property_name method as its getter-method' #", "an is_deleted value ' 'of \"%s\" (%s), but %s was raised instead:\\n' '", "_update: test_object._is_new = False for dirty in (True, False, None): test_object._is_dirty = dirty", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified", "for modified in GoodDateTimes: if type(modified) == datetime: expected = modified elif type(modified)", "'_del_is_dirty method as its deleter-method' ) def testis_new(self): # Tests the is_new property", "expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected,", "* ####################################### # Initialization needed before member # # definition can take place", "'Development' ####################################### # Standard library imports needed # ####################################### import os import sys", "None in the ' 'underlying storage attribute, but \"%s\" (%s) was ' 'found", "' '(%s) was returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "data_dict): pass @classmethod def get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects, sort_by):", "to ' 'be set' % (created, type(created).__name__) ) except (TypeError, ValueError): pass except", "as required by BaseDataObject' ): self.fail( 'Calling _create should return a known '", "is not expected to be publicly deletable, # # the second item here", "\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__,", "BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None", "# definition can take place # ####################################### ####################################### # Module-level Constants # #######################################", "datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes =", "Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if", "setter-method' # ) # # - If property_name is not expected to be", "dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as", "that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use", "' '_get_modified method as its getter-method' ) # - Assert that the setter", "a value of \"%s\" (%s) ' 'as an oid, but %s was raised", "% (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created method of the", "Any needed from __future__ imports # # Create an \"__all__\" list to support", "# - Test all \"good\" values for created in GoodDateTimes: if type(created) ==", "its deleter-method' ) def testoid(self): # Tests the oid property of the BaseDataObject", "is_deleted else False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\"", "'BaseDataObject.is_active is expected to use the ' '_del_is_active method as its deleter-method' )", "expected to use the ' '_get_is_dirty method as its getter-method' ) # -", "self.assertEquals(actual, expected, '_get_created was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget,", "raise TypeError ' 'or ValueError if passed an is_dirty value ' 'of \"%s\"", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( is_deleted,", "passed a value of \"%s\" (%s) ' 'as an oid, but %s was", "# - setters oid = uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active", "% ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "% (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified method of the", "is_active, is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self)", "(%s) should return ' '\"%s\" (%s) through the property, but \"%s\" ' '(%s)", "property of the BaseDataObject class # - Assert that the getter is correct:", "LocalSuite = unittest.TestSuite() ####################################### # Import the module being tested # ####################################### import", "(%s) as valid is_deleted values, but it ' 'was allowed to be set'", "storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) )", "self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the ' 'underlying storage attribute,", ") ) def test_del_is_deleted(self): # Tests the _del_is_deleted method of the BaseDataObject class", "True if is_new else False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new", "test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\" (%s), but ' 'returned", "'_get_is_dirty was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new", "hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__ imports # # Create an", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_deleted", "' 'was not what was expected' ) except Exception as error: self.fail( 'BaseDataObject.save", "expected, 'Setting is_deleted to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "is_new value ' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s'", "= BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual,", "for created in GoodDateTimes: if type(created) == datetime: expected = created elif type(created)", "(1,2), tuple(), True, False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456'", "== datetime: expected = created elif type(created) in (int, float): expected = datetime.fromtimestamp(created)", "getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the '", "expected to use the ' '_del_is_new method as its deleter-method' ) def testmodified(self):", "(test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created method of the BaseDataObject", "self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "the ' '_set_modified method as its setter-method' ) # - Assert that the", "as its setter-method' # ) # # - If property_name is not expected", "a known ' 'error-message, but the message returned ' 'was not what was", "# Third-party imports needed # ####################################### ####################################### # Local imports needed # #######################################", "expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\" (%s),", "is_new else False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\"", "all \"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should", "correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the ' '_get_created method", "in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s)", "is_active value ' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s'", "test__init__(self): # Tests the __init__ method of the BaseDataObject class # - All", "BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created)", "expected, 'Setting created to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "actual, expected, 'Setting is_active to \"%s\" (%s) should return ' '\"%s\" (%s) through", "Test all \"good\" values for oid in GoodOIDs: if type(oid) == UUID: expected", "is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the ' '_del_is_new", "- strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside the", "# setter- and deleter-method calls are operating as # expected. # - deleters", "\"from module import member\" use # ####################################### __all__ = [ # Test-case classes", "[ # - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # -", "needed from __future__ imports # # Create an \"__all__\" list to support #", "decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase(", "\"%s\" (%s) ' 'was returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual,", "- timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01", "valid is_dirty values, but it ' 'was allowed to be set' % (is_dirty,", "instance ' 'with an underlying None value' ) def test_get_oid(self): # Tests the", "BaseDataObjectDerived() # - Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected", "= test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s) should return '", "accept ' '\"%s\" (%s) as a valid oid, but it was ' 'allowed", "( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "raised instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__, error ) ) def", "an is_dirty value ' 'of \"%s\" (%s), but %s was raised instead:\\n' '", "# Tests the _del_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_active value", "value' ) def test_get_is_active(self): # Tests the _get_is_active method of the BaseDataObject class", "error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_update, as required", "'_del_is_new method as its deleter-method' ) def testmodified(self): # Tests the modified property", "####################################### import os import sys import unittest from datetime import datetime from uuid", "modified to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "of \"%s\" (%s) ' 'as an oid, but %s was raised instead:\\n' '", "= 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in the", "to force a call to _create: test_object._is_new = True for dirty in (True,", "# ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases to execute", "GoodOIDs = [ # - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'),", "UUID value ' 'if it\\'s retrieved from an instance with an ' 'underlying", "# 'BaseDataObject.property_name is expected to use the ' # '_get_property_name method as its", "################################### def test__init__(self): # Tests the __init__ method of the BaseDataObject class #", "values, but it was allowed to ' 'be set' % (created, type(created).__name__) )", "the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the", "'underlying None value' ) def test_set_created(self): # Tests the _set_created method of the", "None) self.assertEquals(test_object._oid, None) # - setters oid = uuid4() created = GoodDateTimes[0] modified", "' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error ) ) def test_set_oid(self): #", "# 'BaseDataObject.property_name is expected to use the ' # '_set_property_name method as its", "'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_active values, but", "'was returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "= BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created,", "# dependencies. Avoid if possible. # ####################################### ####################################### # Initialization that needs to", "self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the ' '_get_oid method as", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset,", "the _del_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified", "property, but \"%s\" (%s) ' 'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected,", "BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self):", "should # # be changed to None, and the failure message adjusted #", "( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests the _set_is_new", "should raise TypeError ' 'or ValueError if passed an is_dirty value ' 'of", ") # - Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted)", "test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\" (%s), but ' 'returned", "expected to use the ' '_get_is_deleted method as its getter-method' ) # -", "- Assert that the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name,", "expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead' % (", "should return ' '\"%s\" (%s) through the property, but \"%s\" (%s) ' 'was", "instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", ") ) # - Test all \"bad\" values for oid in BadOIDs: try:", "# # accordingly: # # - Assert that the deleter is correct: #", "(%s) as valid is_new values, but it ' 'was allowed to be set'", "BaseDataObject._get_created, 'BaseDataObject.created is expected to use the ' '_get_created method as its getter-method'", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new,", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected", "is_active = False is_deleted = True is_dirty = True is_new = False test_object", "use the ' '_get_is_deleted method as its getter-method' ) # - Assert that", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted", "= BaseDataObjectDerived() expected = 'expected value' test_object._created = expected actual = test_object.created self.assertEquals(actual,", "outside the UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime(", "BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the ' '_set_is_new method as its setter-method'", "the property, but \"%s\" (%s) ' 'was returned instead' % ( modified, type(modified).__name__,", "def test_get_created(self): # Tests the _get_created method of the BaseDataObject class test_object =", "!= ( 'BaseDataObjectDerived has not implemented ' '_create, as required by BaseDataObject' ):", "is expected to use the ' '_del_is_deleted method as its deleter-method' ) def", "return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__,", "is prove that the various # setter- and deleter-method calls are operating as", "UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ]", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created =", "- Assert that the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name,", "resolve circular # # dependencies. Avoid if possible. # ####################################### ####################################### # Initialization", "property, but \"%s\" (%s) ' 'was returned instead' % ( modified, type(modified).__name__, expected,", "self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the ' '_get_is_active method as", "BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def", "the property, but \"%s\" (%s) ' 'was returned instead' % ( is_new, type(is_new).__name__,", "' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): #", "should not accept \"%s\" ' '(%s) as modified values, but it was allowed", "the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the", "should raise TypeError ' 'or ValueError if passed an is_active value ' 'of", "type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests the _set_is_new method of", "BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None", "class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified", "should not accept ' '\"%s\" (%s) as a valid oid, but it was", "import UUID, uuid4 ####################################### # Third-party imports needed # ####################################### ####################################### # Local", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None,", "BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected,", "to be set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception", "test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s) should return ' '\"%s\"", "instead' % ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the _del_is_active", "needed # ####################################### from idic.unit_testing import * ####################################### # Initialization needed before member", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should", ") ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified", "# # \"from module import member\" use # ####################################### __all__ = [ #", "type(created) in (int, float): expected = datetime.fromtimestamp(created) elif type(created) == str: expected =", "**criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls,", "a valid oid, but it was ' 'allowed to be set' % (oid,", "the UNIX epoch, just in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01", ") ) # - Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try:", "' 'or ValueError if passed an is_dirty value ' 'of \"%s\" (%s), but", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is", "'', (1,2), tuple() ] GoodDateTimes = [ # - actual datetime values datetime.now(),", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_new in", "type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "type(actual).__name__, ) ) # - Test all \"bad\" values for modified in BadDateTimes:", "(%s) was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self):", "'BaseDataObject.is_new is expected to use the ' '_set_is_new method as its setter-method' )", "== UUID: expected = oid elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid)", "= True if is_deleted else False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting", "== str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual(", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new,", "def test_del_is_dirty(self): # Tests the _del_is_dirty method of the BaseDataObject class test_object =", "== str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting", "class # # - Assert that the getter is correct: # self.assertEqual( #", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected actual =", "self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_active values,", "attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ )", "value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in the ' 'underlying", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified,", "is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to", "'1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and # # decorator-methods # #######################################", "(BaseDataObject._set_property_name) should # # be changed to None, and the failure message adjusted", "use the ' '_del_is_dirty method as its deleter-method' ) def testis_new(self): # Tests", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected", "Tests the _get_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "def test_set_created(self): # Tests the _set_created method of the BaseDataObject class test_object =", "= time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if not results.errors and", "values, but it was allowed to ' 'be set' % (modified, type(modified).__name__) )", "####################################### from idic.unit_testing import * ####################################### # Initialization needed before member # #", "that the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name", "'__main__': import time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time()", "needed before member # # definition can take place # ####################################### ####################################### #", "accept ' '\"%s\" (%s) as valid is_deleted values, but it ' 'was allowed", "_get_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "- Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents:", "to ' 'be set' % (modified, type(modified).__name__) ) except (TypeError, ValueError): pass except", "is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the ' '_get_oid", "####################################### # Third-party imports needed # ####################################### ####################################### # Local imports needed #", "####################################### ####################################### # Code to execute if file is called # # or", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for oid in", "value of \"%s\" (%s) ' 'as an oid, but %s was raised instead:\\n'", "self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid = uuid4() created", "'BaseDataObject.created is expected to use the ' '_set_created method as its setter-method' )", "the is_deleted property of the BaseDataObject class # - Assert that the getter", "was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead' %", "test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime value if", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel,", "its deleter-method' ) def testmodified(self): # Tests the modified property of the BaseDataObject", "####################################### if __name__ == '__main__': import time results = unittest.TestResult() testStartTime = time.time()", "modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified to", "BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the ' '_get_oid method as its", "BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected,", "TypeError ' 'or ValueError if passed an is_deleted value ' 'of \"%s\" (%s),", "module import member\" use # ####################################### __all__ = [ # Test-case classes #", "rights reserved' __status__ = 'Development' ####################################### # Standard library imports needed # #######################################", "' 'was allowed to be set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError):", "BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the ' '_set_is_active method as its setter-method'", "test_set_is_deleted(self): # Tests the _set_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived()", "= expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\"", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests the", "reserved' __status__ = 'Development' ####################################### # Standard library imports needed # ####################################### import", "( 'BaseDataObjectDerived has not implemented ' '_create, as required by BaseDataObject' ): self.fail(", "or run directly. # ####################################### if __name__ == '__main__': import time results =", "# - Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected =", "\"good\" values for created in GoodDateTimes: if type(created) == datetime: expected = created", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for oid in", "BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted method as its", ") ) def test_set_is_dirty(self): # Tests the _set_is_dirty method of the BaseDataObject class", "is_new property of the BaseDataObject class # - Assert that the getter is", "BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the ' '_set_is_new method as its", "unit-tests for the module at hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__", "take place # ####################################### ####################################### # Module-level Constants # ####################################### LocalSuite = unittest.TestSuite()", ") def test_del_is_deleted(self): # Tests the _del_is_deleted method of the BaseDataObject class test_object", "instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "'_get_modified method as its getter-method' ) # - Assert that the setter is", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is", "= BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_new", ") ) # - Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try:", "= True is_dirty = True is_new = False test_object = BaseDataObjectDerived( oid, created,", "Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to", "expected. # - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted,", "__copyright__ = 'Copyright 2018, all rights reserved' __status__ = 'Development' ####################################### # Standard", "%s was raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error )", "'BaseDataObject.created is expected to use the ' '_get_created method as its getter-method' )", "'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in the '", "type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the _del_is_dirty method of the BaseDataObject", "'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_deleted,", "'was not what was expected' ) except Exception as error: self.fail( 'BaseDataObject.save did", "# - Test all \"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail(", "force a call to _create: test_object._is_new = True for dirty in (True, False,", "error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests the _set_is_deleted method of the", "values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept", "Tests the created property of the BaseDataObject class # - Assert that the", "method as its setter-method' # ) # # - If property_name is not", "the _get_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "\"%s\" (%s) was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def", "else False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s)", "is_active property of the BaseDataObject class # - Assert that the getter is", "self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s) should return ' '\"%s\" (%s)", "'BaseDataObject.oid is expected to use the ' '_del_oid method as its deleter-method' )", "return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod", "the _get_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "expected to use the ' '_del_modified method as its deleter-method' ) def testoid(self):", "test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s) should", "BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the ' '_set_oid method as its", "message adjusted # # accordingly: # # - Assert that the deleter is", "correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the ' '_del_modified method", "setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the '", "import time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() -", "% (created, type(created).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "= True if is_new else False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting", "expected, '_get_is_deleted was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "the __init__ method of the BaseDataObject class # - All we need to", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_deleted value", "use the ' '_get_modified method as its getter-method' ) # - Assert that", "the ' 'expected error while being tested' ) # - Set things up", "instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the _del_is_new", "underlying None value' ) def test_get_oid(self): # Tests the _get_oid method of the", "the _set_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "method as its deleter-method' ) def testis_active(self): # Tests the is_active property of", "type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID", "for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept '", "####################################### # Child-module test-cases to execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests)", "False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid", "TypeError ' 'or ValueError if passed a modified value of ' '\"%s\" (%s),", "BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_del_property_name", "test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\" (%s), but ' 'returned", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel,", "Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects", ") ) def test_set_is_deleted(self): # Tests the _set_is_deleted method of the BaseDataObject class", "Tests the _get_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "'BaseDataObject._del_created should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._created, type(test_object._created).__name__", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty,", "####################################### ####################################### # Initialization that needs to # # happen after member definition.", "= test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s) should return '", "try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid", "'_set_created method as its setter-method' ) # - Assert that the deleter is", "the ' '_del_created method as its deleter-method' ) def testis_active(self): # Tests the", "test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should", "deleter-method' ) def testmodified(self): # Tests the modified property of the BaseDataObject class", "test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should", "' 'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % (", "getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the '", "up to force a call to _update: test_object._is_new = False for dirty in", "' '_set_is_new method as its setter-method' ) # - Assert that the deleter", "attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ )", "test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the _del_is_active method of the", "is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created method of the BaseDataObject", "method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\"", "objects should not accept \"%s\" ' '(%s) as modified values, but it was", "(%s) ' 'as an oid, but %s was raised instead:\\n' ' %s' %", "# Test-case classes # Child test-modules ] ####################################### # Module metadata/dunder-names # #######################################", "None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime value if it\\'s retrieved", "created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting created to", "case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01", "Third-party imports needed # ####################################### ####################################### # Local imports needed # ####################################### from", "correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty method", "None value' ) def test_get_oid(self): # Tests the _get_oid method of the BaseDataObject", "Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if", "= [ # - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), #", "the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the", "but \"%s\" (%s) ' 'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__,", "to use the ' '_del_is_deleted method as its deleter-method' ) def testis_dirty(self): #", "(%s) was ' 'found instead' % ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self):", "it ' 'was allowed to be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError,", ") def test_get_created(self): # Tests the _get_created method of the BaseDataObject class test_object", "instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "returned ' 'was not what was expected' ) except Exception as error: self.fail(", "True if is_active else False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active", "12:34:56', # - datetimes outside the UNIX epoch, just in case datetime.strptime( '2001-01-01", "was allowed to ' 'be set' % (created, type(created).__name__) ) except (TypeError, ValueError):", "= BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave", "_create: test_object._is_new = True for dirty in (True, False, None): test_object._is_dirty = dirty", "test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s) should", "\"%s\" (%s), but ' 'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual,", "= datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting", "the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the", "self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the ' '_get_modified method as", "sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods #", "oid, created, modified, is_active, is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def", "as its getter-method' # ) # # - If property_name is not expected", "be set' % (oid, type(oid).__name__) ) except (TypeError, ValueError): pass except Exception as", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created = expected actual", "* ####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False,", ") except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects should", "# Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0", "type(actual).__name__, ) ) # - Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents:", "# Tests the save method of the BaseDataObject class test_object = BaseDataObjectDerived() #", "objects should not accept ' '\"%s\" (%s) as a valid oid, but it", "'', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ # - actual UUID values uuid4(),", "_create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria)", "test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\" (%s), but ' 'returned", "datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56',", "the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the", "test_del_created(self): # Tests the _del_created method of the BaseDataObject class test_object = BaseDataObjectDerived()", "use the ' '_del_created method as its deleter-method' ) def testis_active(self): # Tests", "- All we need to do here is prove that the various #", "self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created method of the BaseDataObject class", "= test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s) should return '", "oid, but it was ' 'allowed to be set' % (oid, type(oid).__name__) )", "value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in the ' 'underlying", "the _set_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "= uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted =", "should raise TypeError ' 'or ValueError if passed a value of \"%s\" (%s)", "'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_deleted values, but", "if __name__ == '__main__': import time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results)", "data_objects from hms_core.data_objects import * ####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents", "'_set_is_active method as its setter-method' ) # - Assert that the deleter is", "- actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456,", "datetime: expected = created elif type(created) in (int, float): expected = datetime.fromtimestamp(created) elif", "return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected actual", "the ' '_set_is_dirty method as its setter-method' ) # - Assert that the", "Tests the _del_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active =", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected", "import hms_core.data_objects as data_objects from hms_core.data_objects import * ####################################### # Constants for test-methods", "to use the ' '_del_created method as its deleter-method' ) def testis_active(self): #", "use the ' # '_get_property_name method as its getter-method' # ) # #", "# Tests the _set_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() #", "be set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception as", "file is called # # or run directly. # ####################################### if __name__ ==", "item here (BaseDataObject._set_property_name) should # # be changed to None, and the failure", "not expected to be publicly deletable, # # the second item here (BaseDataObject._del_property_name)", "not accept ' '\"%s\" (%s) as valid is_new values, but it ' 'was", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals(", "self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the ' '_set_is_new method as", "'3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [", "to use the ' # '_del_property_name method as its deleter-method' # ) LocalSuite.addTests(", "use the ' '_get_is_new method as its getter-method' ) # - Assert that", "' 'if it\\'s retrieved from an instance with an ' 'underlying None value'", "self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the ' '_set_oid method as", "the property, but \"%s\" (%s) ' 'was returned instead' % ( created, type(created).__name__,", "'2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string", "self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the", "####################################### # Initialization that needs to # # happen after member definition. #", "but \"%s\" (%s) was ' 'found instead' % ( test_object._created, type(test_object._created).__name__ ) )", "its setter-method' # ) # # - If property_name is not expected to", "to use the ' '_set_oid method as its setter-method' ) # - Assert", "def testcreated(self): # Tests the created property of the BaseDataObject class # -", "Imports to resolve circular # # dependencies. Avoid if possible. # ####################################### #######################################", "# or run directly. # ####################################### if __name__ == '__main__': import time results", ") def test_get_is_dirty(self): # Tests the _get_is_dirty method of the BaseDataObject class test_object", "'_get_oid method as its getter-method' ) # - Assert that the setter is", "should not accept ' '\"%s\" (%s) as valid is_new values, but it '", "'2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside the UNIX epoch,", "calls are operating as # expected. # - deleters first test_object = BaseDataObjectDerived()", "values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254',", "values for modified in GoodDateTimes: if type(modified) == datetime: expected = modified elif", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected", "None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None)", "self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s) should return ' '\"%s\" (%s)", "Tests the _set_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "def testis_active(self): # Tests the is_active property of the BaseDataObject class # -", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_dirty", "Tests the _del_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified =", "BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty method as its getter-method'", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID,", "BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_get_property_name", "def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self,", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected actual =", "# Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018, all", "Tests the _del_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted =", "as its deleter-method' ) def testmodified(self): # Tests the modified property of the", "correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty method", "= BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave", "instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self):", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents:", "test_object._modified, None, 'BaseDataObject._del_modified should leave None in the ' 'underlying storage attribute, but", "invalid values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and #", "created, modified, is_active, is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def _update(self):", "testoid(self): # Tests the oid property of the BaseDataObject class # - Assert", "while being tested' ) # - Set things up to force a call", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created = expected", "tested' ) # - Set things up to force a call to _update:", "test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_new", "try: test_object.save() except NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has not", "GoodOIDs: if type(oid) == UUID: expected = oid elif type(oid) == str: expected", "test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the _del_is_dirty method of the", "def test_set_oid(self): # Tests the _set_oid method of the BaseDataObject class test_object =", "modified, is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted,", "to use the ' '_set_created method as its setter-method' ) # - Assert", "possible. # ####################################### ####################################### # Initialization that needs to # # happen after", "error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an", "actual, expected, 'Setting is_dirty to \"%s\" (%s) should return ' '\"%s\" (%s) through", "not implemented ' '_update, as required by BaseDataObject' ): self.fail( 'Calling _create should", "and deleter-method calls are operating as # expected. # - deleters first test_object", "test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as error: if str(error) != (", "test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as modified values,", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty", "= dirty try: test_object.save() except NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty", "to use the ' '_get_oid method as its getter-method' ) # - Assert", "all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should", "first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new,", "is called # # or run directly. # ####################################### if __name__ == '__main__':", "objects should raise TypeError ' 'or ValueError if passed a value of \"%s\"", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty()", "== datetime: expected = modified elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified)", "] ####################################### # Code-coverage test-case and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest):", "Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to", "'expected value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget,", "error ) ) def test_set_modified(self): # Tests the _set_modified method of the BaseDataObject", "returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a", "BaseDataObject class # # - Assert that the getter is correct: # self.assertEqual(", "BaseDataObject._data_time_string ), ] BadDateTimes = [ # - invalid types (1,2), tuple(), True,", "but %s was raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error", "all \"good\" values for modified in GoodDateTimes: if type(modified) == datetime: expected =", "# ####################################### ####################################### # Initialization that needs to # # happen after member", "member # # definition can take place # ####################################### ####################################### # Module-level Constants", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created = expected actual = test_object.created", "\"%s\" (%s) was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def", "type(oid) == UUID: expected = oid elif type(oid) == str: expected = UUID(oid)", "'_set_oid method as its setter-method' ) # - Assert that the deleter is", "'BaseDataObject.property_name is expected to use the ' # '_del_property_name method as its deleter-method'", "is_new = False test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new", "(%s) ' 'was returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "created in GoodDateTimes: if type(created) == datetime: expected = created elif type(created) in", "as valid is_dirty values, but it ' 'was allowed to be set' %", "_set_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "' '_set_is_deleted method as its setter-method' ) # - Assert that the deleter", "method as its deleter-method' ) def testoid(self): # Tests the oid property of", "the BaseDataObject class # # - Assert that the getter is correct: #", "= BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified,", "raised instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error ) ) def", "sys import unittest from datetime import datetime from uuid import UUID, uuid4 #######################################", "allowed to ' 'be set' % (modified, type(modified).__name__) ) except (TypeError, ValueError): pass", "of class methods # ################################### def test__init__(self): # Tests the __init__ method of", "_get_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the ' '_get_modified method as its", "test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\" (%s), but ' 'returned", "BadOIDs = [ # - invalid types (1,2), tuple(), True, False, object(), #", "\"%s\" (%s) ' 'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual,", "False, 'BaseDataObject._del_is_deleted should leave None in the ' 'underlying storage attribute, but \"%s\"", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._created,", "object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage", ") def test_set_is_active(self): # Tests the _set_is_active method of the BaseDataObject class test_object", "\"%s\" (%s) ' 'was returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual,", "and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule =", "to resolve circular # # dependencies. Avoid if possible. # ####################################### ####################################### #", "\"%s\" (%s) should return ' '\"%s\" (%s) through the property, but \"%s\" '", "' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests", "(%s), but %s was raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__,", "' 'be set' % (modified, type(modified).__name__) ) except (TypeError, ValueError): pass except Exception", "'_del_is_deleted method as its deleter-method' ) def testis_dirty(self): # Tests the is_dirty property", "####################################### # Imports to resolve circular # # dependencies. Avoid if possible. #", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected actual =", "= 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in the", "# '_set_property_name method as its setter-method' # ) # # - If property_name", "the is_active property of the BaseDataObject class # - Assert that the getter", "# Tests the _del_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new", "test_set_is_dirty(self): # Tests the _set_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived()", "the ' '_set_is_new method as its setter-method' ) # - Assert that the", "raise TypeError ' 'or ValueError if passed an is_active value ' 'of \"%s\"", "Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected", "class properties # ################################### def testcreated(self): # Tests the created property of the", "'BaseDataObject._get_created should return a ' 'datetime value if it\\'s retrieved from an instance", "is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests the _set_is_deleted method", "pass @classmethod def get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects, sort_by): pass", "' '_get_created method as its getter-method' ) # - Assert that the setter", "elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual,", "value ' 'if it\\'s retrieved from an instance with an ' 'underlying None", "the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None,", "is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False actual =", "error while being tested' ) # - Set things up to force a", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active", "actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\" (%s), but", "Import the module being tested # ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects", "Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to", "\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__,", "#!/usr/bin/env python \"\"\" Defines unit-tests for the module at hms_core.data_objects. \"\"\" ####################################### #", "% (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "to be set' % (oid, type(oid).__name__) ) except (TypeError, ValueError): pass except Exception", "has not implemented ' '_update, as required by BaseDataObject' ): self.fail( 'Calling _create", "BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_set_property_name method as", "BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ] GoodDateTimes = [ # -", "test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False actual = test_object.is_dirty self.assertEqual( actual,", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected", "'BaseDataObject.is_new is expected to use the ' '_del_is_new method as its deleter-method' )", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the", "% ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests the", "in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s)", "False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs =", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new,", "= BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual,", "BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the ' '_del_is_active method as its", "- Assert that the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name,", "type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected =", "raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected actual =", "type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests the _set_is_active method of", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted,", "'BaseDataObjectDerived has not implemented ' '_create, as required by BaseDataObject' ): self.fail( 'Calling", "values, but it ' 'was allowed to be set' % (is_new, type(is_new).__name__) )", "method as its setter-method' ) # - Assert that the deleter is correct:", "setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the '", "not accept ' '\"%s\" (%s) as valid is_dirty values, but it ' 'was", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()),", "test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s) should return ' '\"%s\"", "2018, all rights reserved' __status__ = 'Development' ####################################### # Standard library imports needed", "= test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\" (%s), but '", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected", "# BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the ' #", "= False test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty, is_new )", "self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s) should return ' '\"%s\" (%s)", "if is_dirty else False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted,", "but %s was raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error", "Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to", "UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s)", "# Child test-modules ] ####################################### # Module metadata/dunder-names # ####################################### __author__ = '<NAME>'", "strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # -", "= datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified)", "getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected", "as its getter-method' ) # - Assert that the setter is correct: self.assertEqual(", "actual, expected, 'Setting created to \"%s\" (%s) should return ' '\"%s\" (%s) through", "GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False actual = test_object.is_dirty self.assertEqual(", ") ) def test_get_is_deleted(self): # Tests the _get_is_deleted method of the BaseDataObject class", "instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self):", "# Tests of class methods # ################################### def test__init__(self): # Tests the __init__", "an oid, but %s was raised instead:\\n' ' %s' % ( oid, type(oid).__name__,", "# - Test all \"good\" values for modified in GoodDateTimes: if type(modified) ==", "all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty", "the ' # '_del_property_name method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject", "type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): # Tests the _set_modified method of", "_testNamespace = 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### #", "( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true',", "self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty method as", "class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active", "False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s) should", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected", "as its deleter-method' ) def testis_active(self): # Tests the is_active property of the", "try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid", "type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the _get_is_deleted method of the BaseDataObject", "set' % (modified, type(modified).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "Assert that the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, #", "retrieved from an instance ' 'with an underlying None value' ) def test_get_is_active(self):", "# Local imports needed # ####################################### from idic.unit_testing import * ####################################### # Initialization", "UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs =", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to", "test_get_modified(self): # Tests the _get_modified method of the BaseDataObject class test_object = BaseDataObjectDerived()", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for oid in GoodOIDs:", "Assert that the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, #", "self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters", "in (True, False, None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as error:", "] BadDateTimes = [ # - invalid types (1,2), tuple(), True, False, object(),", "created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\" '", "expected to use the ' '_set_created method as its setter-method' ) # -", "property of the BaseDataObject class # # - Assert that the getter is", "test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the _del_is_new method of the", "test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in the ' 'underlying storage", "to be set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception", "the setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is", "datetime, 'BaseDataObject._get_modified should return a ' 'datetime value if it\\'s retrieved from an", "####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import * ####################################### # Constants for", "' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests", "to use the ' '_del_is_new method as its deleter-method' ) def testmodified(self): #", "be set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception as", "the getter is correct: self.assertEqual( BaseDataObject.modified.fget, BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the", "of the BaseDataObject class # - Assert that the getter is correct: self.assertEqual(", "for dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError", "= GoodDateTimes[1] is_active = False is_deleted = True is_dirty = True is_new =", "as valid is_deleted values, but it ' 'was allowed to be set' %", "'_get_is_deleted method as its getter-method' ) # - Assert that the setter is", "storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__", "objects should raise TypeError ' 'or ValueError if passed a created value of", "pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods # ###################################", "# - Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail(", ") ) def testsave(self): # Tests the save method of the BaseDataObject class", "actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\" (%s), but", "should not accept \"%s\" ' '(%s) as created values, but it was allowed", "BaseDataObject._del_created, 'BaseDataObject.created is expected to use the ' '_del_created method as its deleter-method'", "adjusted # # accordingly: # # - Assert that the deleter is correct:", "Tests the is_deleted property of the BaseDataObject class # - Assert that the", "if it\\'s retrieved from an instance ' 'with an underlying None value' )", "BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the ' '_get_is_active method as its", ") def testis_new(self): # Tests the is_new property of the BaseDataObject class #", "' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): #", "is not expected to be publicly settable, # # the second item here", "'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified", "test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\" (%s), but ' 'returned", "GoodDateTimes: if type(modified) == datetime: expected = modified elif type(modified) in (int, float):", "type(actual).__name__, ) ) # - Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents:", "datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified", "hms_core.data_objects as data_objects from hms_core.data_objects import * ####################################### # Constants for test-methods #", "test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the ' 'underlying storage attribute, but", "' '(%s) as modified values, but it was allowed to ' 'be set'", "# # the second item here (BaseDataObject._del_property_name) should # # be changed to", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): #", "'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted method as its deleter-method' )", "set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "= 'expected value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests the _get_is_new", "**criteria): pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ###################################", "expected = 'expected value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a created", "'', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and # # decorator-methods #", "(%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests", "Tests the _set_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "= BaseDataObjectDerived() # - Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new)", "call to _create: test_object._is_new = True for dirty in (True, False, None): test_object._is_dirty", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created,", "\"good\" values for oid in GoodOIDs: if type(oid) == UUID: expected = oid", "BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty method as its", "class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__(", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new,", "in case datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime(", "but \"%s\" (%s) ' 'was returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__,", "'_del_modified method as its deleter-method' ) def testoid(self): # Tests the oid property", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected actual =", "self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def", "value' test_object._oid = expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to", "except Exception as error: self.fail( 'BaseDataObject.save did not raise the ' 'expected error", "error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a", "deleter-method' ) def testis_deleted(self): # Tests the is_deleted property of the BaseDataObject class", "_get_created method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "but \"%s\" (%s) was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) )", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False,", "def test_get_is_new(self): # Tests the _get_is_new method of the BaseDataObject class test_object =", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is", "the _get_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "the _get_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else False", "expected = 'expected value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new", "actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s) should return", "the ' '_del_is_new method as its deleter-method' ) def testmodified(self): # Tests the", "# Import the module being tested # ####################################### import hms_core.data_objects as data_objects from", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created", "use the ' '_del_oid method as its deleter-method' ) # def testproperty_name(self): #", "if type(created) == datetime: expected = created elif type(created) in (int, float): expected", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals(", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted", "the second item here (BaseDataObject._del_property_name) should # # be changed to None, and", "(1,2), tuple() ] GoodDateTimes = [ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890),", "to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead' % ( expected,", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals(", "if passed an is_active value ' 'of \"%s\" (%s), but %s was raised", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for modified in GoodDateTimes:", "a call to _create: test_object._is_new = True for dirty in (True, False, None):", "test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return", "####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ):", "actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s) should return", "oid to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "test_get_is_deleted(self): # Tests the _get_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived()", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a created value", "False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid = uuid4()", "values, but it ' 'was allowed to be set' % (is_dirty, type(is_dirty).__name__) )", "is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to", "% ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) ) def test_set_is_new(self): # Tests the", "test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime value if", "to use the ' '_del_is_dirty method as its deleter-method' ) def testis_new(self): #", "def test_del_modified(self): # Tests the _del_modified method of the BaseDataObject class test_object =", "__future__ imports # # Create an \"__all__\" list to support # # \"from", "while being tested' ) ################################### # Tests of class properties # ################################### def", "that the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name", "is expected to use the ' '_del_is_active method as its deleter-method' ) def", "error.__class__.__name__, error ) ) def test_set_oid(self): # Tests the _set_oid method of the", "BaseDataObject class test_object = BaseDataObjectDerived() # - Set things up to force a", "is expected to use the ' '_del_created method as its deleter-method' ) def", "is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified()", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected actual", "the _set_created method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "tested' ) ################################### # Tests of class properties # ################################### def testcreated(self): #", "correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected to use the ' '_get_is_active method", "do here is prove that the various # setter- and deleter-method calls are", "is_dirty to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "@classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria): pass @classmethod def", "\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__,", "ValueError if passed a created value of ' '\"%s\" (%s), but %s was", "12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ # - invalid types (1,2), tuple(),", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals(", "expected = datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string )", "# - Test all \"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail(", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is", "def testis_new(self): # Tests the is_new property of the BaseDataObject class # -", "a ' 'datetime value if it\\'s retrieved from an instance ' 'with an", "instance ' 'with an underlying None value' ) def test_get_is_active(self): # Tests the", "self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the ' '_get_created method as", "% ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected to return \"%s\" (%s), but", "error ) ) def test_set_oid(self): # Tests the _set_oid method of the BaseDataObject", "self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted method as", "'<NAME>' __copyright__ = 'Copyright 2018, all rights reserved' __status__ = 'Development' ####################################### #", "test_object._created, None, 'BaseDataObject._del_created should leave None in the ' 'underlying storage attribute, but", "is expected to use the ' '_set_created method as its setter-method' ) #", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new", "# - Set things up to force a call to _update: test_object._is_new =", "child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular # # dependencies. Avoid", "def test_get_is_active(self): # Tests the _get_is_active method of the BaseDataObject class test_object =", "'BaseDataObject objects should not accept \"%s\" ' '(%s) as created values, but it", "error ) ) def test_set_is_deleted(self): # Tests the _set_is_deleted method of the BaseDataObject", "BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as", "an instance ' 'with an underlying None value' ) def test_get_is_active(self): # Tests", "elif type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual =", "is expected to use the ' '_get_oid method as its getter-method' ) #", "the BaseDataObject class # - All we need to do here is prove", "ValueError if passed an is_dirty value ' 'of \"%s\" (%s), but %s was", "), ] BadDateTimes = [ # - invalid types (1,2), tuple(), True, False,", "type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests the _set_is_deleted method of", "( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "not raise the ' 'expected error while being tested' ) ################################### # Tests", "def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria): pass @classmethod def sort(cls,", "its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### # Child-module test-cases", "test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the _del_is_deleted method of the", "\"%s\" (%s) was ' 'found instead' % ( test_object._created, type(test_object._created).__name__ ) ) def", "modified) def test_del_created(self): # Tests the _del_created method of the BaseDataObject class test_object", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_deleted", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is", "'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the", "Tests the _del_created method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created =", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel,", "test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the ' 'underlying storage", ") ) ####################################### # Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def", "from idic.unit_testing import * ####################################### # Initialization needed before member # # definition", "the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the", ") ) # - Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try:", "in GoodOIDs: if type(oid) == UUID: expected = oid elif type(oid) == str:", "'BaseDataObject.is_active is expected to use the ' '_get_is_active method as its getter-method' )", "to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass", "_del_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value'", "BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_set_property_name", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active =", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget,", "' 'was allowed to be set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError):", "'\"%s\" (%s) through the property, but \"%s\" ' '(%s) was returned instead.' %", "an underlying None value' ) def test_get_is_active(self): # Tests the _get_is_active method of", "\"%s\" ' '(%s) as created values, but it was allowed to ' 'be", "' '(%s) as created values, but it was allowed to ' 'be set'", "expected = oid elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual =", "the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the", "time.time() - testStartTime PrintTestResults(results) if not results.errors and not results.failures: SaveTestReport(results, 'hms_core.data_objects', 'hms_core.data_objects.test-results')", "raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_dirty,", "str(error) != ( 'BaseDataObjectDerived has not implemented ' '_create, as required by BaseDataObject'", "but it ' 'was allowed to be set' % (is_deleted, type(is_deleted).__name__) ) except", "to use the ' '_get_created method as its getter-method' ) # - Assert", "' '\"%s\" (%s) as valid is_dirty values, but it ' 'was allowed to", "values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False", "'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified method", "'if it\\'s retrieved from an instance with an ' 'underlying None value' )", "testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if not results.errors", "Test all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects", "but \"%s\" (%s) was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) )", "instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # -", "(%s), but %s was raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__,", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self):", "( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the _del_is_active method of", "correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the ' '_del_oid method", "is expected to use the ' '_get_is_deleted method as its getter-method' ) #", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active is expected", "returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use", "of the BaseDataObject class # # - Assert that the getter is correct:", "test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None in the ' 'underlying storage", "BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted method as its setter-method'", "expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\" (%s),", "'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_dirty values, but", "# - Set things up to force a call to _create: test_object._is_new =", "'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty method as its setter-method' )", "'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # - invalid types (1,2), tuple(), True,", "type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests the _get_is_new method of", "hms_core.data_objects import * ####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [", "'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a modified value", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected actual = test_object.oid", "to use the ' '_get_modified method as its getter-method' ) # - Assert", ") def test_get_oid(self): # Tests the _get_oid method of the BaseDataObject class test_object", "] ####################################### # Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright", "test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in", "- deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty,", "[ # - invalid types (1,2), tuple(), True, False, object(), # - invalid", "correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the ' '_set_is_new method", "objects should raise TypeError ' 'or ValueError if passed an is_dirty value '", "type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values for created", "is_active, is_deleted, is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted)", "the _get_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "# # be changed to None, and the failure message adjusted # #", "should raise TypeError ' 'or ValueError if passed an is_deleted value ' 'of", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel,", "None, 'BaseDataObject._del_oid should leave None in the ' 'underlying storage attribute, but \"%s\"", "attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def", "# # or run directly. # ####################################### if __name__ == '__main__': import time", "self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty method as", "is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty, is_new", "test_del_is_active(self): # Tests the _del_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived()", "all \"bad\" values for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should", "the ' '_del_is_active method as its deleter-method' ) def testis_deleted(self): # Tests the", "expected, '_get_modified was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", ") # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted", "def testmodified(self): # Tests the modified property of the BaseDataObject class # -", "from uuid import UUID, uuid4 ####################################### # Third-party imports needed # ####################################### #######################################", "Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects", "but it ' 'was allowed to be set' % (is_active, type(is_active).__name__) ) except", "if type(modified) == datetime: expected = modified elif type(modified) in (int, float): expected", "# Tests the oid property of the BaseDataObject class # - Assert that", "Set things up to force a call to _update: test_object._is_new = False for", "be set' % (is_dirty, type(is_dirty).__name__) ) except (TypeError, ValueError): pass except Exception as", "of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals(", "'\"%s\" (%s) as valid is_active values, but it ' 'was allowed to be", "for the module at hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__ imports", "allowed to ' 'be set' % (created, type(created).__name__) ) except (TypeError, ValueError): pass", "%s was raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error )", "Tests the _set_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "type(actual).__name__ ) ) def test_get_is_new(self): # Tests the _get_is_new method of the BaseDataObject", "instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self):", "\"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not", "is correct: # self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to", "Initialization needed before member # # definition can take place # ####################################### #######################################", "'1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ # - invalid types (1,2),", "= 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the", "# '_del_property_name method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) )", "from datetime import datetime from uuid import UUID, uuid4 ####################################### # Third-party imports", "# - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', # - datetimes outside", "= test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\" (%s), but '", ") ####################################### # Child-module test-cases to execute # ####################################### # import child_module #", "oid in GoodOIDs: if type(oid) == UUID: expected = oid elif type(oid) ==", "'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the '", "' '\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( created,", "of the BaseDataObject class # - All we need to do here is", "'\"%s\" (%s) as a valid oid, but it was ' 'allowed to be", "type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "'_del_property_name method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) #######################################", "(%s) as valid is_active values, but it ' 'was allowed to be set'", "expected to be publicly deletable, # # the second item here (BaseDataObject._del_property_name) should", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected", "'was allowed to be set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass", "should return a ' 'datetime value if it\\'s retrieved from an instance '", "was raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) )", "but %s was raised instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error", "# Tests the _get_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s) should return", "type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified method of the BaseDataObject class", "expected, 'Setting is_new to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "= unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if", "self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): #", "= BaseDataObjectDerived() # - Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty)", "' '_get_oid method as its getter-method' ) # - Assert that the setter", "it ' 'was allowed to be set' % (is_new, type(is_new).__name__) ) except (TypeError,", "is_deleted property of the BaseDataObject class # - Assert that the getter is", "# # accordingly: # # - Assert that the setter is correct: #", "Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to", "all \"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should", "error ) ) def test_set_is_active(self): # Tests the _set_is_active method of the BaseDataObject", "####################################### # Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the", ") def test_del_modified(self): # Tests the _del_modified method of the BaseDataObject class test_object", "_create should return a known ' 'error-message, but the message returned ' 'was", "UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs", "self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids):", "self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created method", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active, 'BaseDataObject.is_active", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to", "but \"%s\" (%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self):", "(%s) ' 'was returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "%s was raised instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__, error )", "# Tests the _set_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() #", ") # - Set things up to force a call to _update: test_object._is_new", "the ' '_get_is_dirty method as its getter-method' ) # - Assert that the", "value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should leave None in the ' 'underlying", "implemented ' '_create, as required by BaseDataObject' ): self.fail( 'Calling _create should return", "idic.unit_testing import * ####################################### # Initialization needed before member # # definition can", "modified elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified) == str:", "# '_get_property_name method as its getter-method' # ) # # - If property_name", "needed # ####################################### ####################################### # Local imports needed # ####################################### from idic.unit_testing import", "): self.fail( 'Calling _create should return a known ' 'error-message, but the message", "test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in", "' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid =", "but it was allowed to ' 'be set' % (created, type(created).__name__) ) except", "'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the", "the _del_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty = 'unexpected", "it\\'s retrieved from an instance with an ' 'underlying None value' ) def", "modified in GoodDateTimes: if type(modified) == datetime: expected = modified elif type(modified) in", "' 'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) )", "that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use", "' 'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._oid,", "objects should not accept ' '\"%s\" (%s) as valid is_new values, but it", ") ) def test_del_is_new(self): # Tests the _del_is_new method of the BaseDataObject class", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected actual", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_new", "_del_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value'", "*oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria): pass", "self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value ' 'if it\\'s retrieved from", "' 'allowed to be set' % (oid, type(oid).__name__) ) except (TypeError, ValueError): pass", "pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### #", "BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected,", "use the ' '_set_modified method as its setter-method' ) # - Assert that", "# - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False)", "Tests the is_new property of the BaseDataObject class # - Assert that the", "test_get_is_dirty(self): # Tests the _get_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived()", "property, but \"%s\" (%s) ' 'was returned instead' % ( is_active, type(is_active).__name__, expected,", "True is_new = False test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted, is_dirty,", "- strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ #", "' 'or ValueError if passed a modified value of ' '\"%s\" (%s), but", "objects should raise TypeError ' 'or ValueError if passed an is_active value '", "ValueError if passed an is_deleted value ' 'of \"%s\" (%s), but %s was", "# ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None", "12:34:56', '1911-01-01 12:34:56', # - datetimes outside the UNIX epoch, just in case", "( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the _get_is_dirty", "getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the '", "objects should not accept ' '\"%s\" (%s) as valid is_dirty values, but it", "was raised instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__, error ) )", "test_object._oid, None, 'BaseDataObject._del_oid should leave None in the ' 'underlying storage attribute, but", "] BadOIDs = [ # - invalid types (1,2), tuple(), True, False, object(),", "' 'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "is_new ) def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria):", "from an instance with an ' 'underlying None value' ) def test_set_created(self): #", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid = expected actual", "_get_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active", "@testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods # ################################### def", "BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\"", "' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): #", "True for dirty in (True, False, None): test_object._is_dirty = dirty try: test_object.save() except", "= test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s) should return '", "# BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the ' #", "# Tests the is_dirty property of the BaseDataObject class # - Assert that", "what was expected' ) except Exception as error: self.fail( 'BaseDataObject.save did not raise", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel,", "self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in the ' 'underlying storage attribute,", "of ' '\"%s\" (%s), but %s was raised instead:\\n' ' %s' % (", "deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the '", "Tests the _del_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new =", "the ' '_get_is_new method as its getter-method' ) # - Assert that the", "'_get_is_dirty method as its getter-method' ) # - Assert that the setter is", "if file is called # # or run directly. # ####################################### if __name__", "Tests the property_name property of the BaseDataObject class # # - Assert that", "'BaseDataObject.property_name is expected to use the ' # '_get_property_name method as its getter-method'", "setter is correct: # self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected", "'BaseDataObject._del_is_deleted should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "valid oid, but it was ' 'allowed to be set' % (oid, type(oid).__name__)", "is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty", "is expected to use the ' '_set_is_dirty method as its setter-method' ) #", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is", "but \"%s\" (%s) was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) )", "= UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\"", "% ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty method as its setter-method'", "' 'or ValueError if passed an is_deleted value ' 'of \"%s\" (%s), but", "use the ' '_get_is_active method as its getter-method' ) # - Assert that", "is_new values, but it ' 'was allowed to be set' % (is_new, type(is_new).__name__)", "- If property_name is not expected to be publicly deletable, # # the", "= [ True, False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2),", "self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s) should return ' '\"%s\" (%s)", "####################################### ####################################### # Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import", "correct: self.assertEqual( BaseDataObject.is_deleted.fdel, BaseDataObject._del_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_del_is_deleted method", "if passed an is_deleted value ' 'of \"%s\" (%s), but %s was raised", "oid elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual(", "but %s was raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error", "__author__ = '<NAME>' __copyright__ = 'Copyright 2018, all rights reserved' __status__ = 'Development'", "'Setting created to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "# ####################################### from idic.unit_testing import * ####################################### # Initialization needed before member #", "try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as created", "_get_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "def testis_deleted(self): # Tests the is_deleted property of the BaseDataObject class # -", "use the ' # '_set_property_name method as its setter-method' # ) # #", "correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use", "\"good\" values for modified in GoodDateTimes: if type(modified) == datetime: expected = modified", "'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the '", "should leave None in the ' 'underlying storage attribute, but \"%s\" (%s) was", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_new in", "is_dirty values, but it ' 'was allowed to be set' % (is_dirty, type(is_dirty).__name__)", "test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True)", "= GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted = True is_dirty =", "self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the ' '_del_modified method as", "'Setting is_deleted to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else False actual = test_object.is_new self.assertEqual(", "to use the ' '_set_is_deleted method as its setter-method' ) # - Assert", "####################################### ####################################### # Local imports needed # ####################################### from idic.unit_testing import * #######################################", ") def test_del_is_new(self): # Tests the _del_is_new method of the BaseDataObject class test_object", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is", "'or ValueError if passed an is_deleted value ' 'of \"%s\" (%s), but %s", "( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the _del_is_dirty method of", "dirty try: test_object.save() except NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has", "# - Assert that the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget, #", "instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self):", "'(%s) was returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "it was allowed to ' 'be set' % (created, type(created).__name__) ) except (TypeError,", "!= ( 'BaseDataObjectDerived has not implemented ' '_update, as required by BaseDataObject' ):", "self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the ' '_del_oid method as", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed an is_deleted", "= 'Copyright 2018, all rights reserved' __status__ = 'Development' ####################################### # Standard library", "class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created", "Tests the is_dirty property of the BaseDataObject class # - Assert that the", "__status__ = 'Development' ####################################### # Standard library imports needed # ####################################### import os", "should raise TypeError ' 'or ValueError if passed an is_new value ' 'of", "####################################### # Standard library imports needed # ####################################### import os import sys import", "is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified", "Set things up to force a call to _create: test_object._is_new = True for", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_dirty in", "to use the ' '_set_is_active method as its setter-method' ) # - Assert", "0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ] GoodDateTimes = [", ") ####################################### # Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self,", "is_dirty, is_new ) self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty)", "expected to use the ' '_get_oid method as its getter-method' ) # -", "was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the", "getter is correct: self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the '", "its setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_deleted.fdel,", "actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a", "Tests of class methods # ################################### def test__init__(self): # Tests the __init__ method", "' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): #", "not implemented ' '_create, as required by BaseDataObject' ): self.fail( 'Calling _create should", "created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted = True is_dirty", "# Tests the _del_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid", "'_get_oid was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to", "to be publicly deletable, # # the second item here (BaseDataObject._del_property_name) should #", "' '\"%s\" (%s) through the property, but \"%s\" (%s) ' 'was returned instead'", "not accept \"%s\" ' '(%s) as modified values, but it was allowed to", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for created in", "use the ' # '_del_property_name method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase(", "'expected value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected", "methods # ################################### def test__init__(self): # Tests the __init__ method of the BaseDataObject", "' 'was returned instead' % ( modified, type(modified).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "the ' 'expected error while being tested' ) ################################### # Tests of class", "values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # - timestamp numbers 1234567890, 1234567890.123456, # - strings", "'\"%s\" (%s) as valid is_deleted values, but it ' 'was allowed to be", "for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False actual", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted,", "' '_get_is_deleted method as its getter-method' ) # - Assert that the setter", "is correct: self.assertEqual( BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the ' '_del_modified", "%s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests", "returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", ") ) # - Test all \"bad\" values for created in BadDateTimes: try:", "( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests the _set_is_dirty", "deleter-method' ) # def testproperty_name(self): # # Tests the property_name property of the", "up to force a call to _create: test_object._is_new = True for dirty in", "= 'expected value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was", "method as its getter-method' # ) # # - If property_name is not", "for modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\"", "# Initialization that needs to # # happen after member definition. # #######################################", "= expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\"", "expected to use the ' # '_set_property_name method as its setter-method' # )", "use the ' '_del_is_new method as its deleter-method' ) def testmodified(self): # Tests", "its deleter-method' ) def testis_new(self): # Tests the is_new property of the BaseDataObject", "values for created in GoodDateTimes: if type(created) == datetime: expected = created elif", "'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted, False, 'BaseDataObject._del_is_deleted should leave None in the '", "_set_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "GoodDateTimes: if type(created) == datetime: expected = created elif type(created) in (int, float):", "being tested' ) # - Set things up to force a call to", "member definition. # ####################################### ####################################### # Code to execute if file is called", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty,", "correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_del_is_dirty method", "'was returned instead' % ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "expected to use the ' '_del_created method as its deleter-method' ) def testis_active(self):", "'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [ # - invalid types (1,2), tuple(),", "'returned \"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid", "Exception as error: self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if", "Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if", "####################################### # Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None,", "Tests the _set_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "setter- and deleter-method calls are operating as # expected. # - deleters first", "test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return", "NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has not implemented ' '_create,", "that needs to # # happen after member definition. # ####################################### ####################################### #", "datetime.strptime( '2001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56',", "self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_deleted values,", "in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime(", "getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified,", "was expected' ) except Exception as error: self.fail( 'BaseDataObject.save did not raise the", "expected, '_get_is_dirty was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "use the ' '_del_modified method as its deleter-method' ) def testoid(self): # Tests", "self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid = uuid4() created = GoodDateTimes[0]", "the _del_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected", "to use the ' '_del_oid method as its deleter-method' ) # def testproperty_name(self):", "values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not accept", "(%s) was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self):", "objects should not accept \"%s\" ' '(%s) as created values, but it was", ") # - Test all \"bad\" values for created in BadDateTimes: try: test_object._set_created(created)", "= expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\"", "save method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Set things", "allowed to be set' % (is_new, type(is_new).__name__) ) except (TypeError, ValueError): pass except", "is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to", "'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ # - actual UUID values", "test_object = BaseDataObjectDerived() test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should", "module being tested # ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import *", ") # - Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active)", "execute if file is called # # or run directly. # ####################################### if", "in GoodDateTimes: if type(created) == datetime: expected = created elif type(created) in (int,", "as valid is_new values, but it ' 'was allowed to be set' %", "expected = 'expected value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified", "as its deleter-method' ) def testis_deleted(self): # Tests the is_deleted property of the", "' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created", "the module being tested # ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted = 'unexpected value' test_object._del_is_deleted() self.assertEquals( test_object._is_deleted,", "####################################### # Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018,", "correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the ' '_del_created method", "storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) )", "False actual = test_object.is_dirty self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s) should", "the ' # '_set_property_name method as its setter-method' # ) # # -", "type(actual).__name__ ) ) test_object._created = None self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a '", "allowed to be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except", "test_object._modified = 'unexpected value' test_object._del_modified() self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in", "raise TypeError ' 'or ValueError if passed an is_new value ' 'of \"%s\"", "'BaseDataObject.modified is expected to use the ' '_del_modified method as its deleter-method' )", "the property_name property of the BaseDataObject class # # - Assert that the", "and the failure message adjusted # # accordingly: # # - Assert that", "####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular #", "% (oid, type(oid).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail(", "tested # ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import * ####################################### #", "BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created, None, 'BaseDataObject._del_created should leave None", "instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created method of", "self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_new values,", "self.assertEquals( test_object._modified, None, 'BaseDataObject._del_modified should leave None in the ' 'underlying storage attribute,", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_dirty", "accept \"%s\" ' '(%s) as created values, but it was allowed to '", "the ' '_get_modified method as its getter-method' ) # - Assert that the", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_active.fget, BaseDataObject._get_is_active,", "(%s) ' 'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "self.assertEqual(type(test_object._get_created()), datetime, 'BaseDataObject._get_created should return a ' 'datetime value if it\\'s retrieved from", "unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if not", "'BaseDataObject._del_is_active should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "is_deleted values, but it ' 'was allowed to be set' % (is_deleted, type(is_deleted).__name__)", "set' % (oid, type(oid).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use", "the ' '_set_created method as its setter-method' ) # - Assert that the", "the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the", "type(modified).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "(modified, type(modified).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "= expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\"", "not accept ' '\"%s\" (%s) as valid is_deleted values, but it ' 'was", "actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s) should return", "raise TypeError ' 'or ValueError if passed a created value of ' '\"%s\"", "was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the", "property, but \"%s\" (%s) ' 'was returned instead' % ( is_new, type(is_new).__name__, expected,", "setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the '", "= None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime value if it\\'s", "- Assert that the getter is correct: self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected", "\"%s\" (%s) was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def", "is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True if is_new else False actual =", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests", "is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the ' '_del_oid", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is", "BaseDataObjectDerived() # - Test all \"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected", "' '_del_modified method as its deleter-method' ) def testoid(self): # Tests the oid", "numbers 1234567890, 1234567890.123456, # - strings '2001-01-01 12:34:56', '3001-01-01 12:34:56', '1911-01-01 12:34:56', #", "True is_dirty = True is_new = False test_object = BaseDataObjectDerived( oid, created, modified,", "' 'found instead' % ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests", "test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the ' 'underlying storage", "'expected value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected", "getter-method' # ) # # - If property_name is not expected to be", "test_get_is_active(self): # Tests the _get_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived()", "BaseDataObjectDerived() # - Test all \"good\" values for modified in GoodDateTimes: if type(modified)", "# Child-module test-cases to execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) #######################################", ") def test_set_created(self): # Tests the _set_created method of the BaseDataObject class test_object", "os import sys import unittest from datetime import datetime from uuid import UUID,", "expected, '_get_is_new was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s)", "test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should", "passed an is_active value ' 'of \"%s\" (%s), but %s was raised instead:\\n'", "# # the second item here (BaseDataObject._set_property_name) should # # be changed to", "# # - Assert that the getter is correct: # self.assertEqual( # BaseDataObject.property_name.fget,", "None, and the failure message adjusted # # accordingly: # # - Assert", "test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s) should", "'_get_is_new was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "property_name is not expected to be publicly settable, # # the second item", "= [ 'true', '', (1,2), tuple() ] GoodDateTimes = [ # - actual", "as valid is_active values, but it ' 'was allowed to be set' %", "BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_set_is_dirty method as its", "the is_dirty property of the BaseDataObject class # - Assert that the getter", "need to do here is prove that the various # setter- and deleter-method", "BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the ' '_set_modified method as its setter-method'", "- invalid values 'true', '', '1911-01-01 12:34:56.123456' ] GoodOIDs = [ # -", "UUID: expected = oid elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual", "# - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests", "\"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( is_new,", "type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the _del_is_active method of the BaseDataObject", "returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept ' '\"%s\" (%s)", "'BaseDatObject objects should not accept ' '\"%s\" (%s) as a valid oid, but", "= 'expected value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was", "- If property_name is not expected to be publicly settable, # # the", "= test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s) should return '", "but \"%s\" (%s) was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) )", "% ( is_new, type(is_new).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "' '\"%s\" (%s) as valid is_deleted values, but it ' 'was allowed to", "\"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not", "str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid", "Module metadata/dunder-names # ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018, all rights", "was raised instead:\\n' ' %s' % ( is_new, type(is_new).__name__, error.__class__.__name__, error ) )", "BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the ' '_get_is_new method as its", "Tests the _get_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "modified property of the BaseDataObject class # - Assert that the getter is", "is expected to use the ' '_get_is_new method as its getter-method' ) #", "BaseDataObject._set_created, 'BaseDataObject.created is expected to use the ' '_set_created method as its setter-method'", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty", "# - Test all \"bad\" values for created in BadDateTimes: try: test_object._set_created(created) self.fail(", "BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ # -", "False is_deleted = True is_dirty = True is_new = False test_object = BaseDataObjectDerived(", "the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to use the", "should return a UUID value ' 'if it\\'s retrieved from an instance with", "test_set_is_new(self): # Tests the _set_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived()", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new()", "self.fail( 'BaseDataObject objects should raise TypeError ' 'or ValueError if passed a modified", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._created = None", "@classmethod def get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting", "method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Set things up", "test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the ' 'underlying storage", "the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the", "uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), UUID('00000000-0000-0000-0000-000000000000'), # - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff',", "'\"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( created, type(created).__name__,", "settable, # # the second item here (BaseDataObject._set_property_name) should # # be changed", "test_set_oid(self): # Tests the _set_oid method of the BaseDataObject class test_object = BaseDataObjectDerived()", "'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created method", "deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use the '", "# - invalid types (1,2), tuple(), True, False, object(), # - invalid values", "ValueError if passed an is_new value ' 'of \"%s\" (%s), but %s was", "publicly settable, # # the second item here (BaseDataObject._set_property_name) should # # be", "'was allowed to be set' % (is_active, type(is_active).__name__) ) except (TypeError, ValueError): pass", "TypeError ' 'or ValueError if passed an is_new value ' 'of \"%s\" (%s),", "def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return", "class test_object = BaseDataObjectDerived() # - Set things up to force a call", "'1911-01-01 12:34:56', # - datetimes outside the UNIX epoch, just in case datetime.strptime(", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the _get_is_deleted method", "oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified,", "] GoodOIDs = [ # - actual UUID values uuid4(), str(uuid4()), UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),", "call to _update: test_object._is_new = False for dirty in (True, False, None): test_object._is_dirty", "is expected to use the ' '_set_modified method as its setter-method' ) #", "' '\"%s\" (%s) as valid is_new values, but it ' 'was allowed to", ") test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime value", "pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls, *oids, **criteria): pass @classmethod", "= test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\" (%s), but '", "expected, 'Setting is_dirty to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "expected = 'expected value' test_object._created = expected actual = test_object.created self.assertEquals(actual, expected, '_get_created", "all \"bad\" values for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should", "(True, False, None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as error: if", "error: self.fail( 'BaseDataObject.save did not raise the ' 'expected error while being tested'", "Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset, BaseDataObject._set_is_new, 'BaseDataObject.is_new is expected to", "to use the ' '_set_is_new method as its setter-method' ) # - Assert", "has not implemented ' '_create, as required by BaseDataObject' ): self.fail( 'Calling _create", "storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__", "True, False, object(), # - invalid values 'true', '', '1911-01-01 12:34:56.123456' ] #######################################", "the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to use the", "needed # ####################################### import os import sys import unittest from datetime import datetime", "actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\" (%s), but", "BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the ' '_del_created method as its", "if is_active else False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected", "in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s)", "return ' '\"%s\" (%s) through the property, but \"%s\" ' '(%s) was returned", "that the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use", "but \"%s\" (%s) ' 'was returned instead' % ( is_new, type(is_new).__name__, expected, type(expected).__name__,", "- invalid values 'true', '', '1911-01-01 12:34:56.123456' ] ####################################### # Code-coverage test-case and", "use # ####################################### __all__ = [ # Test-case classes # Child test-modules ]", "various # setter- and deleter-method calls are operating as # expected. # -", "self.assertEquals(test_object.oid, oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified,", "be publicly settable, # # the second item here (BaseDataObject._set_property_name) should # #", "Code to execute if file is called # # or run directly. #", "value if it\\'s retrieved from an instance ' 'with an underlying None value'", "imports needed # ####################################### import os import sys import unittest from datetime import", "is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted", "was raised instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error ) )", "# import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular # #", "BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def", "is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): # Tests the _set_modified method", "GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if is_deleted else False actual = test_object.is_deleted self.assertEqual(", "test_get_oid(self): # Tests the _get_oid method of the BaseDataObject class test_object = BaseDataObjectDerived()", "\"%s\" (%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): #", "( oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): # Tests the save", "Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True if", "expected to use the ' '_del_is_deleted method as its deleter-method' ) def testis_dirty(self):", "was ' 'found instead' % ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): #", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid", "use the ' '_set_is_dirty method as its setter-method' ) # - Assert that", "deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected", "being tested' ) ################################### # Tests of class properties # ################################### def testcreated(self):", "# - Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is", "dependencies. Avoid if possible. # ####################################### ####################################### # Initialization that needs to #", "Test all \"bad\" values for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects", "_update(self): return BaseDataObject._update(self) def matches(self, **criteria): return BaseDataObject.matches(self, **criteria) def to_data_dict(self): return BaseDataObject.to_data_dict(self)", "'_get_is_active was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "float): expected = datetime.fromtimestamp(created) elif type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string", "things up to force a call to _create: test_object._is_new = True for dirty", "here (BaseDataObject._set_property_name) should # # be changed to None, and the failure message", "- invalid types (1,2), tuple(), True, False, object(), # - invalid values 'true',", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for modified in", "TypeError ' 'or ValueError if passed an is_dirty value ' 'of \"%s\" (%s),", "# ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports to resolve circular", "self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the ' '_set_created method as", "implemented ' '_update, as required by BaseDataObject' ): self.fail( 'Calling _create should return", "is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept ' '\"%s\"", "%s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def test_set_is_deleted(self): # Tests", "returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected actual = test_object.is_dirty", "'as an oid, but %s was raised instead:\\n' ' %s' % ( oid,", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected", "be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception as", "BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._modified = expected actual", "instead' % ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the _del_oid", "actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s) should return", "is_active to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._oid", "= test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty was expected to return \"%s\" (%s), but '", "\"bad\" values for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not", "method as its deleter-method' # ) LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testBaseDataObject ) ) ####################################### #", "- Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True", "expected = 'expected value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active", "as its deleter-method' ) # def testproperty_name(self): # # Tests the property_name property", "an underlying None value' ) def test_get_oid(self): # Tests the _get_oid method of", "type(actual).__name__ ) ) def test_get_modified(self): # Tests the _get_modified method of the BaseDataObject", "correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to use the ' '_del_is_active method", "to execute if file is called # # or run directly. # #######################################", "raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error ) ) def", "called # # or run directly. # ####################################### if __name__ == '__main__': import", "'expected value' test_object._created = expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected", "BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid,", "from an instance ' 'with an underlying None value' ) def test_get_is_active(self): #", "(created, type(created).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "accept ' '\"%s\" (%s) as valid is_active values, but it ' 'was allowed", "BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_get_property_name method as", "'or ValueError if passed a value of \"%s\" (%s) ' 'as an oid,", "' '_set_is_dirty method as its setter-method' ) # - Assert that the deleter", ") # - Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty)", "BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_get_is_deleted method as its getter-method'", "= True is_new = False test_object = BaseDataObjectDerived( oid, created, modified, is_active, is_deleted,", "deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False)", "GoodDateTimes[1] is_active = False is_deleted = True is_dirty = True is_new = False", ") def test_del_is_active(self): # Tests the _del_is_active method of the BaseDataObject class test_object", "( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the _del_is_deleted method of", "Initialization that needs to # # happen after member definition. # ####################################### #######################################", "# ####################################### import os import sys import unittest from datetime import datetime from", "expected to use the ' '_get_created method as its getter-method' ) # -", "data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases in the module #", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected", "'_get_property_name method as its getter-method' # ) # # - If property_name is", "test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_active", "# self.assertEqual( # BaseDataObject.property_name.fget, # BaseDataObject._get_property_name, # 'BaseDataObject.property_name is expected to use the", "####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1,", "returned instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) #", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_new(self): # Tests the", "'be set' % (modified, type(modified).__name__) ) except (TypeError, ValueError): pass except Exception as", "error.__class__.__name__, error ) ) def test_set_modified(self): # Tests the _set_modified method of the", "self.fail( 'Calling _create should return a known ' 'error-message, but the message returned", "but \"%s\" ' '(%s) was returned instead.' % ( oid, type(oid).__name__, expected, type(expected).__name__,", "the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to use the", "that the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, # 'BaseDataObject.property_name", "None, 'BaseDataObject._del_created should leave None in the ' 'underlying storage attribute, but \"%s\"", "expected = modified elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified)", "elif type(created) == str: expected = datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual =", "UUID, uuid4 ####################################### # Third-party imports needed # ####################################### ####################################### # Local imports", "'allowed to be set' % (oid, type(oid).__name__) ) except (TypeError, ValueError): pass except", "- Test all \"good\" values for created in GoodDateTimes: if type(created) == datetime:", "not expected to be publicly settable, # # the second item here (BaseDataObject._set_property_name)", "the _set_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test", "in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else False actual = test_object.is_active", "Tests the _set_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() # -", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.modified.fget,", "####################################### # Code to execute if file is called # # or run", "def get(cls, *oids, **criteria): pass @classmethod def sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting", "method as its deleter-method' ) def testis_deleted(self): # Tests the is_deleted property of", "'BaseDataObject objects should not accept \"%s\" ' '(%s) as modified values, but it", "attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def", "def test_del_created(self): # Tests the _del_created method of the BaseDataObject class test_object =", "str(error) != ( 'BaseDataObjectDerived has not implemented ' '_update, as required by BaseDataObject'", "the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to use the", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset,", "= '<NAME>' __copyright__ = 'Copyright 2018, all rights reserved' __status__ = 'Development' #######################################", "deleter-method' ) def testoid(self): # Tests the oid property of the BaseDataObject class", "%s was raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__, error )", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created() self.assertEquals( test_object._created,", "underlying None value' ) def test_get_is_active(self): # Tests the _get_is_active method of the", "Create an \"__all__\" list to support # # \"from module import member\" use", "as its deleter-method' ) def testoid(self): # Tests the oid property of the", "(%s), but %s was raised instead:\\n' ' %s' % ( created, type(created).__name__, error.__class__.__name__,", "# Tests the _set_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() #", "False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to \"%s\" (%s) should", "the ' '_del_modified method as its deleter-method' ) def testoid(self): # Tests the", "'_set_is_dirty method as its setter-method' ) # - Assert that the deleter is", "testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods # ################################### def test__init__(self): # Tests", "BaseDataObjectDerived() # - Test all \"good\" values for oid in GoodOIDs: if type(oid)", "its deleter-method' ) def testis_dirty(self): # Tests the is_dirty property of the BaseDataObject", "( oid, type(oid).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all", "correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the ' '_set_modified method", "_del_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value'", "Assert that the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, # BaseDataObject._del_property_name, #", "= oid elif type(oid) == str: expected = UUID(oid) test_object._set_oid(oid) actual = test_object.oid", "_set_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all", "_del_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value'", "################################### # Tests of class properties # ################################### def testcreated(self): # Tests the", "the deleter is correct: self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the", "modified, is_active, is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self) def _update(self): return", "UUID, 'BaseDataObject._get_oid should return a UUID value ' 'if it\\'s retrieved from an", "(%s) was ' 'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self):", "GoodDateTimes = [ # - actual datetime values datetime.now(), datetime.fromtimestamp(1234567890), datetime.now().timestamp(), # -", "' 'be set' % (created, type(created).__name__) ) except (TypeError, ValueError): pass except Exception", "test_del_is_dirty(self): # Tests the _del_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived()", "= 'hms_core' _testModule = data_objects LocalSuite.addTests( unittest.TestLoader().loadTestsFromTestCase( testdata_objectsCodeCoverage ) ) ####################################### # Test-cases", "it was allowed to ' 'be set' % (modified, type(modified).__name__) ) except (TypeError,", "= BaseDataObjectDerived() # - Set things up to force a call to _create:", "self.assertEquals(actual, expected, '_get_is_new was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\"", "to _update: test_object._is_new = False for dirty in (True, False, None): test_object._is_dirty =", "# Tests of class properties # ################################### def testcreated(self): # Tests the created", "is_dirty=None, is_new=None ): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty, is_new )", "it was ' 'allowed to be set' % (oid, type(oid).__name__) ) except (TypeError,", "'Setting oid to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test all \"bad\" values", "BaseDataObject._del_property_name, # 'BaseDataObject.property_name is expected to use the ' # '_del_property_name method as", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget,", "'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the '", "the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to use the", "test_del_oid(self): # Tests the _del_oid method of the BaseDataObject class test_object = BaseDataObjectDerived()", "type(created).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "all \"bad\" values for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should", "should not accept ' '\"%s\" (%s) as valid is_active values, but it '", "is expected to use the ' '_set_is_new method as its setter-method' ) #", "# Tests the _set_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() #", "in the ' 'underlying storage attribute, but \"%s\" (%s) was ' 'found instead'", "(%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests", "for is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept '", "# 'BaseDataObject.property_name is expected to use the ' # '_del_property_name method as its", "expected = 'expected value' test_object._is_deleted = expected actual = test_object.is_deleted self.assertEquals(actual, expected, '_get_is_deleted", "% ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests the _set_is_dirty method", "ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects should raise TypeError '", "Tests the is_active property of the BaseDataObject class # - Assert that the", "actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\" (%s), but", "actual, type(actual).__name__, ) ) # - Test all \"bad\" values for is_active in", "(TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects should raise TypeError", "the BaseDataObject class test_object = BaseDataObjectDerived() # - Set things up to force", "test-cases to execute # ####################################### # import child_module # LocalSuite.addTests(child_module.LocalSuite._tests) ####################################### # Imports", "# self.assertEqual( # BaseDataObject.property_name.fset, # BaseDataObject._set_property_name, # 'BaseDataObject.property_name is expected to use the", "' 'datetime value if it\\'s retrieved from an instance ' 'with an underlying", "not what was expected' ) except Exception as error: self.fail( 'BaseDataObject.save did not", "' '\"%s\" (%s) as a valid oid, but it was ' 'allowed to", "method of the BaseDataObject class # - All we need to do here", "that the deleter is correct: self.assertEqual( BaseDataObject.is_dirty.fdel, BaseDataObject._del_is_dirty, 'BaseDataObject.is_dirty is expected to use", "the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_deleted = expected", "instead' % (test_object._is_new, type(test_object._is_new).__name__) ) def test_del_modified(self): # Tests the _del_modified method of", "setter-method' ) # - Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid,", "elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified) == str: expected", "- Assert that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected", "its deleter-method' ) def testis_deleted(self): # Tests the is_deleted property of the BaseDataObject", ") ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime, 'BaseDataObject._get_modified should return a ' 'datetime", "\"bad\" values for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not", "'BaseDataObject.modified is expected to use the ' '_get_modified method as its getter-method' )", "test_object._set_is_new(is_new) expected = True if is_new else False actual = test_object.is_new self.assertEqual( actual,", "attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._created, type(test_object._created).__name__ )", "Tests the _get_created method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "'BaseDataObject._del_is_dirty should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "if passed a value of \"%s\" (%s) ' 'as an oid, but %s", "things up to force a call to _update: test_object._is_new = False for dirty", "BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as", "'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_dirty,", "(%s), but %s was raised instead:\\n' ' %s' % ( is_active, type(is_active).__name__, error.__class__.__name__,", "the ' '_get_is_deleted method as its getter-method' ) # - Assert that the", "is expected to use the ' # '_del_property_name method as its deleter-method' #", "as a valid oid, but it was ' 'allowed to be set' %", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._oid, type(test_object._oid).__name__)", "% ( test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the _del_oid method", "'of \"%s\" (%s), but %s was raised instead:\\n' ' %s' % ( is_new,", "( created, type(created).__name__, error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests the _set_is_active", "'\"%s\" (%s) as valid is_new values, but it ' 'was allowed to be", "test_set_is_active(self): # Tests the _set_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived()", "BaseDataObjectDerived() self.assertEquals(test_object._created, None) self.assertEquals(test_object._is_active, True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None)", "test_object._is_new = True for dirty in (True, False, None): test_object._is_dirty = dirty try:", "Tests the _del_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_dirty =", "): BaseDataObject.__init__( self, oid, created, modified, is_active, is_deleted, is_dirty, is_new ) def _create(self):", "if passed an is_dirty value ' 'of \"%s\" (%s), but %s was raised", "BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual, expected, 'Setting modified to \"%s\"", "Assert that the getter is correct: self.assertEqual( BaseDataObject.oid.fget, BaseDataObject._get_oid, 'BaseDataObject.oid is expected to", "# ) # # - If property_name is not expected to be publicly", "raised instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error ) ) def", "= 'expected value' test_object._created = expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was", "class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget, BaseDataObject._get_is_deleted, 'BaseDataObject.is_deleted", "that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use", "is expected to use the ' '_get_created method as its getter-method' ) #", "was ' 'found instead' % ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): #", "BaseDataObject' ): self.fail( 'Calling _create should return a known ' 'error-message, but the", "True, False, 1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ]", "is_active else False actual = test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\"", "# Tests the _get_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "'BaseDataObject._get_oid should return a UUID value ' 'if it\\'s retrieved from an instance", "operating as # expected. # - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created, None)", "True, 'BaseDataObject._del_is_new should leave None in the ' 'underlying storage attribute, but \"%s\"", "# # dependencies. Avoid if possible. # ####################################### ####################################### # Initialization that needs", "# ################################### def test__init__(self): # Tests the __init__ method of the BaseDataObject class", "BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the ' '_set_modified method as its", ") # - Test all \"bad\" values for oid in BadOIDs: try: test_object._set_oid(oid)", "error.__class__.__name__, error ) ) def test_set_is_active(self): # Tests the _set_is_active method of the", "force a call to _update: test_object._is_new = False for dirty in (True, False,", "__name__ == '__main__': import time results = unittest.TestResult() testStartTime = time.time() LocalSuite.run(results) results.runTime", "for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept \"%s\"", "method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._created = 'unexpected value' test_object._del_created()", "= unittest.TestSuite() ####################################### # Import the module being tested # ####################################### import hms_core.data_objects", "value' test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to", "# Module-level Constants # ####################################### LocalSuite = unittest.TestSuite() ####################################### # Import the module", "datetime from uuid import UUID, uuid4 ####################################### # Third-party imports needed # #######################################", "'1911-01-01 12:34:56.123456' ] GoodOIDs = [ # - actual UUID values uuid4(), str(uuid4()),", ") ) def test_get_is_dirty(self): # Tests the _get_is_dirty method of the BaseDataObject class", "can take place # ####################################### ####################################### # Module-level Constants # ####################################### LocalSuite =", "- Test all \"good\" values for is_deleted in GoodBooleanOrIntEquivalents: test_object._set_is_deleted(is_deleted) expected = True", "test_object.save() except NotImplementedError as error: if str(error) != ( 'BaseDataObjectDerived has not implemented", "use the ' '_get_is_dirty method as its getter-method' ) # - Assert that", "% ( test_object._is_active, type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the _del_is_deleted method", "second item here (BaseDataObject._set_property_name) should # # be changed to None, and the", "= 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None, 'BaseDataObject._del_oid should leave None in the", "self.assertEqual( BaseDataObject.is_active.fset, BaseDataObject._set_is_active, 'BaseDataObject.is_active is expected to use the ' '_set_is_active method as", ") ) def test_get_is_new(self): # Tests the _get_is_new method of the BaseDataObject class", "modified in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\" '", "################################### # Tests of class methods # ################################### def test__init__(self): # Tests the", "'_set_is_deleted method as its setter-method' ) # - Assert that the deleter is", ") def test_set_modified(self): # Tests the _set_modified method of the BaseDataObject class test_object", "' 'underlying None value' ) def test_set_created(self): # Tests the _set_created method of", "after member definition. # ####################################### ####################################### # Code to execute if file is", "\"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject objects should not", "uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted = True", "to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but \"%s\"", "expected to use the ' '_set_is_dirty method as its setter-method' ) # -", "% ( test_object._is_dirty, type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the _del_is_new method", "expected actual = test_object.oid self.assertEquals(actual, expected, '_get_oid was expected to return \"%s\" (%s),", "# ####################################### import hms_core.data_objects as data_objects from hms_core.data_objects import * ####################################### # Constants", "member\" use # ####################################### __all__ = [ # Test-case classes # Child test-modules", "type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): # Tests the save method of", "# Tests the _set_created method of the BaseDataObject class test_object = BaseDataObjectDerived() #", "self.assertEqual( BaseDataObject.is_deleted.fset, BaseDataObject._set_is_deleted, 'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted method as", "to do here is prove that the various # setter- and deleter-method calls", "for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept '", "as error: self.fail( 'BaseDataObject.save did not raise the ' 'expected error while being", "'\"%s\" (%s) as valid is_dirty values, but it ' 'was allowed to be", "# Tests the _get_is_dirty method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "here is prove that the various # setter- and deleter-method calls are operating", "classes # Child test-modules ] ####################################### # Module metadata/dunder-names # ####################################### __author__ =", "failure message adjusted # # accordingly: # # - Assert that the setter", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_deleted.fset,", "type(test_object._is_active).__name__ ) ) def test_del_is_deleted(self): # Tests the _del_is_deleted method of the BaseDataObject", "%s was raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error )", "the BaseDataObject class # - Assert that the getter is correct: self.assertEqual( BaseDataObject.is_deleted.fget,", "from __future__ imports # # Create an \"__all__\" list to support # #", "from an instance ' 'with an underlying None value' ) def test_get_oid(self): #", "Test all \"good\" values for created in GoodDateTimes: if type(created) == datetime: expected", "# ####################################### __all__ = [ # Test-case classes # Child test-modules ] #######################################", "= 'expected value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual, expected, '_get_is_new was", "'Setting is_dirty to \"%s\" (%s) should return ' '\"%s\" (%s) through the property,", "- Test all \"bad\" values for is_dirty in BadBooleanOrIntEquivalents: try: test_object._set_is_dirty(is_dirty) self.fail( 'BaseDataObject", "_get_oid method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value'", "method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active", "self.assertEquals(actual, expected, '_get_is_deleted was expected to return \"%s\" (%s), but ' 'returned \"%s\"", "of the BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values", ") # - Assert that the setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid", "through the property, but \"%s\" (%s) ' 'was returned instead' % ( is_active,", "# def testproperty_name(self): # # Tests the property_name property of the BaseDataObject class", "actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\" (%s), but", "= BaseDataObjectDerived() expected = 'expected value' test_object._is_new = expected actual = test_object.is_new self.assertEquals(actual,", "actual, type(actual).__name__ ) ) def test_get_is_deleted(self): # Tests the _get_is_deleted method of the", "type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests the _get_modified method of", "def test_set_is_dirty(self): # Tests the _set_is_dirty method of the BaseDataObject class test_object =", "# - Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is", "test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid should return a UUID value ' 'if", "self.assertEqual( actual, expected, 'Setting modified to \"%s\" (%s) should return ' '\"%s\" (%s)", "test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept ' '\"%s\" (%s) as a valid", "# - strings 'dc3a7fdf-2183-49cc-aa00-af9239950254', 'ffffffff-ffff-ffff-ffff-ffffffffffff', '00000000-0000-0000-0000-000000000000', 'dc3a7fdf218349ccaa00af9239950254', 'ffffffffffffffffffffffffffffffff', '00000000000000000000000000000000', ] BadOIDs = [", "type(is_active).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject objects", "def testis_dirty(self): # Tests the is_dirty property of the BaseDataObject class # -", "'BaseDataObject objects should not accept ' '\"%s\" (%s) as valid is_new values, but", "test_object.created self.assertEqual( actual, expected, 'Setting created to \"%s\" (%s) should return ' '\"%s\"", "test_object._is_new = False for dirty in (True, False, None): test_object._is_dirty = dirty try:", "datetime.fromtimestamp(modified) elif type(modified) == str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual", "# ####################################### if __name__ == '__main__': import time results = unittest.TestResult() testStartTime =", "Test-cases in the module # ####################################### class BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None,", "# accordingly: # # - Assert that the setter is correct: # self.assertEqual(", "####################################### # Initialization needed before member # # definition can take place #", "test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_dirty in GoodBooleanOrIntEquivalents:", "__all__ = [ # Test-case classes # Child test-modules ] ####################################### # Module", "= 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None in the", "import datetime from uuid import UUID, uuid4 ####################################### # Third-party imports needed #", "(%s) as valid is_dirty values, but it ' 'was allowed to be set'", "% (test_object._oid, type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created method of the", "( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()), UUID, 'BaseDataObject._get_oid", "needs to # # happen after member definition. # ####################################### ####################################### # Code", "for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True, False, 1, 0 ] BadBooleanOrIntEquivalents", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.is_active.fdel, BaseDataObject._del_is_active, 'BaseDataObject.is_active is expected to", "= test_object.created self.assertEquals(actual, expected, '_get_created was expected to return \"%s\" (%s), but '", "\"%s\" (%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self):", "as modified values, but it was allowed to ' 'be set' % (modified,", "@testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class methods # ################################### def test__init__(self):", ") def testoid(self): # Tests the oid property of the BaseDataObject class #", "1, 0 ] BadBooleanOrIntEquivalents = [ 'true', '', (1,2), tuple() ] GoodDateTimes =", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid, None,", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._is_active,", "BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as", "error while being tested' ) ################################### # Tests of class properties # ###################################", "True) self.assertEquals(test_object._is_deleted, False) self.assertEquals(test_object._is_dirty, False) self.assertEquals(test_object._is_new, True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # -", "(%s) ' 'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__,", "type(actual).__name__, ) ) # - Test all \"bad\" values for is_active in BadBooleanOrIntEquivalents:", "but it was ' 'allowed to be set' % (oid, type(oid).__name__) ) except", "values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail( 'BaseDataObject objects should not accept", "set' % (created, type(created).__name__) ) except (TypeError, ValueError): pass except Exception as error:", "( 'BaseDataObjectDerived has not implemented ' '_update, as required by BaseDataObject' ): self.fail(", "are operating as # expected. # - deleters first test_object = BaseDataObjectDerived() self.assertEquals(test_object._created,", "BaseDataObject class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_active", "module at hms_core.data_objects. \"\"\" ####################################### # Any needed from __future__ imports # #", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_is_dirty(self): #", "test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None in the ' 'underlying storage attribute, but", "# ####################################### __author__ = '<NAME>' __copyright__ = 'Copyright 2018, all rights reserved' __status__", "BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True,", "'BaseDataObject._del_modified should leave None in the ' 'underlying storage attribute, but \"%s\" (%s)", "'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) )", "%s was raised instead:\\n' ' %s' % ( oid, type(oid).__name__, error.__class__.__name__, error )", "but \"%s\" (%s) ' 'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__,", "instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._oid = None self.assertEqual(type(test_object._get_oid()),", "by BaseDataObject' ): self.fail( 'Calling _create should return a known ' 'error-message, but", "' 'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._is_new,", "the created property of the BaseDataObject class # - Assert that the getter", "BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the ' '_get_is_new method as its getter-method'", "type(actual).__name__ ) ) def test_get_is_dirty(self): # Tests the _get_is_dirty method of the BaseDataObject", "'expected error while being tested' ) # - Set things up to force", "# # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core' _testModule = data_objects", "type(is_deleted).__name__, error.__class__.__name__, error ) ) def test_set_is_dirty(self): # Tests the _set_is_dirty method of", "that the getter is correct: self.assertEqual( BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use", "setter is correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the '", "results.runTime = time.time() - testStartTime PrintTestResults(results) if not results.errors and not results.failures: SaveTestReport(results,", "error ) ) def test_set_is_dirty(self): # Tests the _set_is_dirty method of the BaseDataObject", "BaseDataObject._get_modified, 'BaseDataObject.modified is expected to use the ' '_get_modified method as its getter-method'", "' 'was returned instead' % ( created, type(created).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "in GoodDateTimes: if type(modified) == datetime: expected = modified elif type(modified) in (int,", "def test_del_is_new(self): # Tests the _del_is_new method of the BaseDataObject class test_object =", "- Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected", "the save method of the BaseDataObject class test_object = BaseDataObjectDerived() # - Set", "self.assertEqual( BaseDataObject.is_new.fdel, BaseDataObject._del_is_new, 'BaseDataObject.is_new is expected to use the ' '_del_is_new method as", "message adjusted # # accordingly: # # - Assert that the setter is", "# Tests the _get_modified method of the BaseDataObject class test_object = BaseDataObjectDerived() expected", "type(test_object._oid).__name__) ) def test_get_created(self): # Tests the _get_created method of the BaseDataObject class", "value' ) def test_get_oid(self): # Tests the _get_oid method of the BaseDataObject class", "test_object.is_active self.assertEqual( actual, expected, 'Setting is_active to \"%s\" (%s) should return ' '\"%s\"", "the ' # '_get_property_name method as its getter-method' # ) # # -", "expected, type(expected).__name__, actual, type(actual).__name__ ) ) def test_get_modified(self): # Tests the _get_modified method", "\"good\" values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else", "\"%s\" (%s) should return ' '\"%s\" (%s) through the property, but \"%s\" (%s)", "is_dirty value ' 'of \"%s\" (%s), but %s was raised instead:\\n' ' %s'", "if possible. # ####################################### ####################################### # Initialization that needs to # # happen", "the oid property of the BaseDataObject class # - Assert that the getter", "(BaseDataObject._del_property_name) should # # be changed to None, and the failure message adjusted", "type(actual).__name__, ) ) # - Test all \"bad\" values for oid in BadOIDs:", "# - Test all \"bad\" values for is_deleted in BadBooleanOrIntEquivalents: try: test_object._set_is_deleted(is_deleted) self.fail(", "True) self.assertEquals(test_object._modified, None) self.assertEquals(test_object._oid, None) # - setters oid = uuid4() created =", "'underlying storage attribute, but \"%s\" (%s) was ' 'found instead' % (test_object._is_new, type(test_object._is_new).__name__)", "accept ' '\"%s\" (%s) as valid is_dirty values, but it ' 'was allowed", "_del_is_new method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value'", ") ) def test_set_oid(self): # Tests the _set_oid method of the BaseDataObject class", "- Test all \"bad\" values for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject", "= test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected to return \"%s\" (%s), but '", "# - Test all \"good\" values for oid in GoodOIDs: if type(oid) ==", "all \"good\" values for oid in GoodOIDs: if type(oid) == UUID: expected =", "type(test_object._is_dirty).__name__ ) ) def test_del_is_new(self): # Tests the _del_is_new method of the BaseDataObject", "def test_get_modified(self): # Tests the _get_modified method of the BaseDataObject class test_object =", "list to support # # \"from module import member\" use # ####################################### __all__", "unittest from datetime import datetime from uuid import UUID, uuid4 ####################################### # Third-party", "' '_del_created method as its deleter-method' ) def testis_active(self): # Tests the is_active", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.oid.fdel, BaseDataObject._del_oid, 'BaseDataObject.oid is expected to", "import * ####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents = [ True,", "' 'or ValueError if passed a value of \"%s\" (%s) ' 'as an", "(is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError): pass except Exception as error: self.fail( 'BaseDataObject", "BaseDataObject.modified.fdel, BaseDataObject._del_modified, 'BaseDataObject.modified is expected to use the ' '_del_modified method as its", "a UUID value ' 'if it\\'s retrieved from an instance with an '", "the property, but \"%s\" (%s) ' 'was returned instead' % ( is_deleted, type(is_deleted).__name__,", "'BaseDataObject.property_name is expected to use the ' # '_set_property_name method as its setter-method'", "BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept ' '\"%s\" (%s) as", "accordingly: # # - Assert that the deleter is correct: # self.assertEqual( #", "'found instead' % ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the", "self.fail( 'BaseDatObject objects should not accept ' '\"%s\" (%s) as a valid oid,", "' '_del_is_dirty method as its deleter-method' ) def testis_new(self): # Tests the is_new", "if is_new else False actual = test_object.is_new self.assertEqual( actual, expected, 'Setting is_new to", "its getter-method' ) # - Assert that the setter is correct: self.assertEqual( BaseDataObject.is_new.fset,", "class methods # ################################### def test__init__(self): # Tests the __init__ method of the", "test_del_is_deleted(self): # Tests the _del_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived()", "correct: self.assertEqual( BaseDataObject.oid.fset, BaseDataObject._set_oid, 'BaseDataObject.oid is expected to use the ' '_set_oid method", "in BadDateTimes: try: test_object._set_modified(modified) self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s)", "setters oid = uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False", "self.assertEqual( BaseDataObject.is_dirty.fget, BaseDataObject._get_is_dirty, 'BaseDataObject.is_dirty is expected to use the ' '_get_is_dirty method as", "oid = uuid4() created = GoodDateTimes[0] modified = GoodDateTimes[1] is_active = False is_deleted", "datetime.strptime( created, BaseDataObject._data_time_string ) test_object._set_created(created) actual = test_object.created self.assertEqual( actual, expected, 'Setting created", "return BaseDataObject.to_data_dict(self) @classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod", "# be changed to None, and the failure message adjusted # # accordingly:", "self.assertEqual( actual, expected, 'Setting is_dirty to \"%s\" (%s) should return ' '\"%s\" (%s)", "BaseDataObject._data_time_string ), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ]", "oid) self.assertEquals(test_object.created, created) self.assertEquals(test_object.is_active, is_active) self.assertEquals(test_object.is_deleted, is_deleted) self.assertEquals(test_object.is_dirty, is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified)", "str: expected = datetime.strptime( modified, BaseDataObject._data_time_string ) test_object._set_modified(modified) actual = test_object.modified self.assertEqual( actual,", "expected = 'expected value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual, expected, '_get_is_dirty", "%s was raised instead:\\n' ' %s' % ( modified, type(modified).__name__, error.__class__.__name__, error )", "Tests the _get_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "' 'was allowed to be set' % (is_deleted, type(is_deleted).__name__) ) except (TypeError, ValueError):", "def test__init__(self): # Tests the __init__ method of the BaseDataObject class # -", "True if is_deleted else False actual = test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted", "is_deleted to \"%s\" (%s) should return ' '\"%s\" (%s) through the property, but", "test_object.is_deleted self.assertEqual( actual, expected, 'Setting is_deleted to \"%s\" (%s) should return ' '\"%s\"", "% ( is_deleted, type(is_deleted).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "BaseDataObjectDerived() # - Set things up to force a call to _create: test_object._is_new", "= modified elif type(modified) in (int, float): expected = datetime.fromtimestamp(modified) elif type(modified) ==", "test_object._modified, type(test_object._modified).__name__ ) ) def test_del_oid(self): # Tests the _del_oid method of the", "is_dirty) self.assertEquals(test_object.is_new, is_new) self.assertEquals(test_object.modified, modified) def test_del_created(self): # Tests the _del_created method of", "% ( test_object._is_deleted, type(test_object._is_deleted).__name__ ) ) def test_del_is_dirty(self): # Tests the _del_is_dirty method", "(%s) instead' % ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None", "correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the ' '_set_created method", "the BaseDataObject class test_object = BaseDataObjectDerived() test_object._oid = 'unexpected value' test_object._del_oid() self.assertEquals( test_object._oid,", "# Tests the _del_is_deleted method of the BaseDataObject class test_object = BaseDataObjectDerived() test_object._is_deleted", "through the property, but \"%s\" ' '(%s) was returned instead.' % ( oid,", "is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use the ' '_set_created", "was raised instead:\\n' ' %s' % ( is_dirty, type(is_dirty).__name__, error.__class__.__name__, error ) )", "# - Assert that the deleter is correct: # self.assertEqual( # BaseDataObject.property_name.fdel, #", "Code-coverage test-case and # # decorator-methods # ####################################### class testdata_objectsCodeCoverage(ModuleCoverageTest): _testNamespace = 'hms_core'", "# - All we need to do here is prove that the various", "is_new in BadBooleanOrIntEquivalents: try: test_object._set_is_new(is_new) self.fail( 'BaseDataObject objects should not accept ' '\"%s\"", "a modified value of ' '\"%s\" (%s), but %s was raised instead:\\n' '", "values for oid in BadOIDs: try: test_object._set_oid(oid) self.fail( 'BaseDatObject objects should not accept", "%s' % ( oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): # Tests", "BaseDataObject.created.fget, BaseDataObject._get_created, 'BaseDataObject.created is expected to use the ' '_get_created method as its", "Tests the _get_is_active method of the BaseDataObject class test_object = BaseDataObjectDerived() expected =", "values for is_active in GoodBooleanOrIntEquivalents: test_object._set_is_active(is_active) expected = True if is_active else False", "is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False actual =", "'_del_created method as its deleter-method' ) def testis_active(self): # Tests the is_active property", "method as its deleter-method' ) def testis_dirty(self): # Tests the is_dirty property of", "'_get_is_deleted was expected to return \"%s\" (%s), but ' 'returned \"%s\" (%s) instead'", "datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes = [ # - invalid types", "use the ' '_set_is_deleted method as its setter-method' ) # - Assert that", "the property, but \"%s\" (%s) ' 'was returned instead' % ( is_dirty, type(is_dirty).__name__,", "' 'was returned instead' % ( is_dirty, type(is_dirty).__name__, expected, type(expected).__name__, actual, type(actual).__name__, )", "not accept ' '\"%s\" (%s) as a valid oid, but it was '", "did not raise the ' 'expected error while being tested' ) ################################### #", "BaseDataObjectDerived(BaseDataObject): def __init__(self, oid=None, created=None, modified=None, is_active=None, is_deleted=None, is_dirty=None, is_new=None ): BaseDataObject.__init__( self,", "= expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return \"%s\"", "expected to use the ' '_get_is_active method as its getter-method' ) # -", "for is_active in BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept '", "the _get_created method of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected", "BadBooleanOrIntEquivalents: try: test_object._set_is_active(is_active) self.fail( 'BaseDataObject objects should not accept ' '\"%s\" (%s) as", "sort(cls, objects, sort_by): pass @testdata_objectsCodeCoverage.AddMethodTesting @testdata_objectsCodeCoverage.AddPropertyTesting class testBaseDataObject(unittest.TestCase): ################################### # Tests of class", "test_object._created = expected actual = test_object.created self.assertEquals(actual, expected, '_get_created was expected to return", "self, oid, created, modified, is_active, is_deleted, is_dirty, is_new ) def _create(self): return BaseDataObject._create(self)", "time.time() LocalSuite.run(results) results.runTime = time.time() - testStartTime PrintTestResults(results) if not results.errors and not", "def testsave(self): # Tests the save method of the BaseDataObject class test_object =", "that the setter is correct: self.assertEqual( BaseDataObject.created.fset, BaseDataObject._set_created, 'BaseDataObject.created is expected to use", "False, None): test_object._is_dirty = dirty try: test_object.save() except NotImplementedError as error: if str(error)", "= BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty = expected actual = test_object.is_dirty self.assertEquals(actual,", "'_create, as required by BaseDataObject' ): self.fail( 'Calling _create should return a known", "def test_get_oid(self): # Tests the _get_oid method of the BaseDataObject class test_object =", "( is_new, type(is_new).__name__, error.__class__.__name__, error ) ) def test_set_modified(self): # Tests the _set_modified", "- Set things up to force a call to _create: test_object._is_new = True", "% ( is_active, type(is_active).__name__, expected, type(expected).__name__, actual, type(actual).__name__, ) ) # - Test", "property, but \"%s\" (%s) ' 'was returned instead' % ( is_deleted, type(is_deleted).__name__, expected,", "'expected value' test_object._is_active = expected actual = test_object.is_active self.assertEquals(actual, expected, '_get_is_active was expected", "expected, 'Setting is_active to \"%s\" (%s) should return ' '\"%s\" (%s) through the", "'found instead' % ( test_object._created, type(test_object._created).__name__ ) ) def test_del_is_active(self): # Tests the", ") def _create(self): return BaseDataObject._create(self) def _update(self): return BaseDataObject._update(self) def matches(self, **criteria): return", "% ( expected, type(expected).__name__, actual, type(actual).__name__ ) ) test_object._modified = None self.assertEqual(type(test_object._get_modified()), datetime,", "actual = test_object.oid self.assertEqual( actual, expected, 'Setting oid to \"%s\" (%s) should return", "test_set_modified(self): # Tests the _set_modified method of the BaseDataObject class test_object = BaseDataObjectDerived()", "' '_create, as required by BaseDataObject' ): self.fail( 'Calling _create should return a", "'BaseDataObject.is_deleted is expected to use the ' '_set_is_deleted method as its setter-method' )", "raised instead:\\n' ' %s' % ( is_deleted, type(is_deleted).__name__, error.__class__.__name__, error ) ) def", "to use the ' '_set_modified method as its setter-method' ) # - Assert", "values for created in BadDateTimes: try: test_object._set_created(created) self.fail( 'BaseDataObject objects should not accept", "not accept ' '\"%s\" (%s) as valid is_active values, but it ' 'was", "- Test all \"good\" values for is_new in GoodBooleanOrIntEquivalents: test_object._set_is_new(is_new) expected = True", "of the BaseDataObject class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_dirty =", "import unittest from datetime import datetime from uuid import UUID, uuid4 ####################################### #", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._is_active = expected actual =", "as created values, but it was allowed to ' 'be set' % (created,", "BaseDataObjectDerived() test_object._is_dirty = 'unexpected value' test_object._del_is_dirty() self.assertEquals( test_object._is_dirty, False, 'BaseDataObject._del_is_dirty should leave None", "= [ # - invalid types (1,2), tuple(), True, False, object(), # -", "test_object = BaseDataObjectDerived() test_object._is_new = 'unexpected value' test_object._del_is_new() self.assertEquals( test_object._is_new, True, 'BaseDataObject._del_is_new should", "Assert that the deleter is correct: self.assertEqual( BaseDataObject.created.fdel, BaseDataObject._del_created, 'BaseDataObject.created is expected to", "error.__class__.__name__, error ) ) def testsave(self): # Tests the save method of the", "Assert that the setter is correct: self.assertEqual( BaseDataObject.is_dirty.fset, BaseDataObject._set_is_dirty, 'BaseDataObject.is_dirty is expected to", "BaseDataObjectDerived() test_object._is_active = 'unexpected value' test_object._del_is_active() self.assertEquals( test_object._is_active, True, 'BaseDataObject._del_is_active should leave None", "class test_object = BaseDataObjectDerived() expected = 'expected value' test_object._created = expected actual =", "@classmethod def delete(cls, *oids): pass @classmethod def from_data_dict(cls, data_dict): pass @classmethod def get(cls,", "test_object._modified = expected actual = test_object.modified self.assertEquals(actual, expected, '_get_modified was expected to return", "), datetime.strptime( '3001-01-01 12:34:56', BaseDataObject._data_time_string ), datetime.strptime( '1911-01-01 12:34:56', BaseDataObject._data_time_string ), ] BadDateTimes", "raise TypeError ' 'or ValueError if passed a modified value of ' '\"%s\"", "self.assertEqual( BaseDataObject.is_new.fget, BaseDataObject._get_is_new, 'BaseDataObject.is_new is expected to use the ' '_get_is_new method as", "values for is_dirty in GoodBooleanOrIntEquivalents: test_object._set_is_dirty(is_dirty) expected = True if is_dirty else False", "from hms_core.data_objects import * ####################################### # Constants for test-methods # ####################################### GoodBooleanOrIntEquivalents =", "attribute, but \"%s\" (%s) was ' 'found instead' % ( test_object._modified, type(test_object._modified).__name__ )", "class test_object = BaseDataObjectDerived() # - Test all \"good\" values for is_active in", "% ( oid, type(oid).__name__, error.__class__.__name__, error ) ) def testsave(self): # Tests the", "self.fail( 'BaseDataObject objects should not accept \"%s\" ' '(%s) as created values, but", ") # def testproperty_name(self): # # Tests the property_name property of the BaseDataObject", "python \"\"\" Defines unit-tests for the module at hms_core.data_objects. \"\"\" ####################################### # Any", "'true', '', (1,2), tuple() ] GoodDateTimes = [ # - actual datetime values", "is correct: self.assertEqual( BaseDataObject.modified.fset, BaseDataObject._set_modified, 'BaseDataObject.modified is expected to use the ' '_set_modified", ") ) ####################################### # Child-module test-cases to execute # ####################################### # import child_module" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "Example({ input0: [1, 2], input1: [3, 4], output0: [1, 2, 3, 4], }).ExpectFailure()", "2020 The Android Open Source Project # # Licensed under the Apache License,", "<reponame>aosp-goes-brrbrr/packages_modules_NeuralNetworks # # Copyright (C) 2020 The Android Open Source Project # #", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "# # Copyright (C) 2020 The Android Open Source Project # # Licensed", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis = 4", "= Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1,", "Project # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "Source Project # # Licensed under the Apache License, Version 2.0 (the \"License\");", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "the specific language governing permissions and # limitations under the License. # input0", "compliance with the License. # You may obtain a copy of the License", "Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4], output0: [1,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# limitations under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1,", "this file except in compliance with the License. # You may obtain a", "the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "1, 1, 2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1,", "you may not use this file except in compliance with the License. #", "input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4], output0: [1, 2, 3,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "The Android Open Source Project # # Licensed under the Apache License, Version", "Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis = 4 output0 = Output(\"output0\",", "(C) 2020 The Android Open Source Project # # Licensed under the Apache", "\"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0:", "ANY KIND, either express or implied. # See the License for the specific", "= Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4], output0:", "= Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis = 4 output0 =", "permissions and # limitations under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1,", "axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4], output0: [1, 2, 3, 4],", "and # limitations under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1,", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3,", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis = 4 output0", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "# input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\",", "under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\")", "= 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model =", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "1, 2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Copyright (C) 2020 The Android Open Source Project # # Licensed under the", "input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\",", "axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 =", "4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\",", "# you may not use this file except in compliance with the License.", "Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\")", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1:", "# Copyright (C) 2020 The Android Open Source Project # # Licensed under", "1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\")", "(the \"License\"); # you may not use this file except in compliance with", "1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis", "model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4],", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "language governing permissions and # limitations under the License. # input0 = Input(\"input0\",", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\")", "\"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1,", "Open Source Project # # Licensed under the Apache License, Version 2.0 (the", "1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "\"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2],", "specific language governing permissions and # limitations under the License. # input0 =", "file except in compliance with the License. # You may obtain a copy", "Android Open Source Project # # Licensed under the Apache License, Version 2.0", "1, 1, 1, 2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1,", "\"{1, 1, 1, 1, 2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis =", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the License. # You may obtain a copy of the License at #", "\"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") axis = 4 output0 = Output(\"output0\", \"TENSOR_FLOAT32\",", "limitations under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1,", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1, 1,", "or agreed to in writing, software # distributed under the License is distributed", "input0, input1, axis).To(output0).IntroducedIn(\"V1_0\") Example({ input0: [1, 2], input1: [3, 4], output0: [1, 2,", "or implied. # See the License for the specific language governing permissions and", "= Input(\"input0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 2}\") input1 = Input(\"input1\", \"TENSOR_FLOAT32\", \"{1,", "governing permissions and # limitations under the License. # input0 = Input(\"input0\", \"TENSOR_FLOAT32\",", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "output0 = Output(\"output0\", \"TENSOR_FLOAT32\", \"{1, 1, 1, 1, 4}\") model = Model().Operation(\"CONCATENATION\", input0," ]
[ "= config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) #", ":param config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path =", "logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for m", "# type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes)", "import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path):", "Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering,", "below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict] # -------------", "Logger): \"\"\" To be used for hyperparameter optimization of the mention pair classifier", "as is, # because our X's are not matrix-like. So we run our", "to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected", "instantiate scorer which fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else:", "of features # producing random and zero values. For 1e3 instances, values between", "for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert that", "MAX_CORES from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import", "\"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for", ":param trial: optuna trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance',", "prediction analysis for each coref link type and prediction examples if not is_clustering_pipeline", "cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring,", "_KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1,", "\"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) ->", "logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p", "ValueError(\"To optimize the mention pair classifier, the 'classifier' config parameter must be the", "oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model,", "= serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline if", "# labels into the mention pair classifier, and feed that to RFECV. To", "classifier at the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper", "directory containing serialized models and scorers :param config_data: :param config_evaluate: :param config_global: :param", "p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline step.\") #", "required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree", "To be used for hyperparameter optimization of the mention pair classifier and agglomerative", "is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as", "we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support", "\"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int", "RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util", "and fall back to leave-one-out (here: one instance = one partition) # if", "_KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] }", "is_clustering_pipeline = False # collect mention pair scorer parameters if not \"pairs\" in", "base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def", "conducted for 1e4 and 1e5 instances. We interpolate between these data points. num_instances", "return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR:", "logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics:", "{}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def", "from joblib import dump, delayed, Parallel, load from optuna import Trial from optuna.samplers", "section for all feature extractors with default values. :return: \"\"\" return { LEMMA_EXTR:", "form of a dictionary.\") # ------------- create base config to more or less", "type(classifier) is dict): raise ValueError(\"To optimize the mention pair classifier, the 'classifier' config", "serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline if is_clustering_pipeline:", "10 runs of 5-fold cross-validation for recursive feature elimination with a Random Forest", "pickle import pprint from logging import Logger from pathlib import Path from typing", "pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\"", "with_clustering: assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else:", "metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True)", "config_global: :param logger: :return: \"\"\" # During the hyperparameter optimization, use a fixed", "== \".pipeline.joblib\": pipelines[i] = load(p) # find out if we are dealing with", "# collect scores df_grid_scores = [] for m in grid_scores: # number of", "RFECV needs X to be an matrix-like of shape (n_samples, n_features). This means", "support = selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support,", "type: Union[str, Dict] # ------------- validate parameters --------------- if not with_clustering and (classifier", "chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring =", "= classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on", "assert len(set(len(s) for s in supports)) == 1 assert len(set(get_dict_hash(fn) for fn in", "assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1 # collect selections in DataFrame", "\"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {},", "document clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could", "RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper", "from preliminary feature selection are used. # None means \"use all features\", an", "else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna", "me why it needs to be stored as a string, using the dict", "make 6 splits at most and fall back to leave-one-out (here: one instance", "import get_dict_hash def load_data(path): # load preprocessed dataset from file with open(path, \"rb\")", "is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is", "these data points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015,", "with clustering parameters. :param trial: optuna trial :return: config dictionary \"\"\" cluster_criterion =", "the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al.", "= Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis", "with open(path, \"rb\") as f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial,", "the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir)", "metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes #", "XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import", "<filename>python/handwritten_baseline/pipeline/model/scripts/train_predict_optimize.py import copy import json import pickle import pprint from logging import Logger", "rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight at a child", "{}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {}", "Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import", "PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X = pipeline.fit_transform(X,", "in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results in a quadratically", "scenario. Depending on those parameters, evaluation results are not representative. I hope you", "during instantiation, so we need to deepcopy the originals to not lose them", "pipeline as is, # because our X's are not matrix-like. So we run", "the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf", "set min_impurity_decrease depending on the number of instances to obtain a useful feature", "= True # if present, inject hard document clusters into the last pipeline", "feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir /", "best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\"", ":param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"]", "= [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write", "inplace=True) # write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\",", "\"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\"", "str) -> Dict: \"\"\" Uses optuna to sample a config with classifier hyperparameters.", "recursive feature elimination with a Random Forest mention classifier to find the most", "elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError # alpha range", "trial so that we can retrieve it later and use it to instantiate", "feature selection result. # min_impurity_decrease was determined based on a series of manual", "must be the name of the classifier to optimize.\") if with_clustering and (type(classifier)", "non-garbage # features). Similar experiments were conducted for 1e4 and 1e5 instances. We", "prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"]", "enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global:", ":param classifier_name: The classifier to use (and sample hyperparameters for). Testing them separately", "--------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"]", "sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna to sample a config", "\"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion", "specified for a mention pair scoring scenario. Depending on those parameters, evaluation results", "-> None: \"\"\" Trains n classifier+clustering pipelines with a given configuration. :param config_data:", "# max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required", "= np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\",", "be of the same type (mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0]", "classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names)", "'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\"", "'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None:", "elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject hard document", "classifiers only: detailed prediction analysis for each coref link type and prediction examples", "on the number of instances to obtain a useful feature selection result. #", "config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier)", "each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors", "_KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use early stopping, so this is", "underscores and pick the last value to obtain the document id hard_document_clusters =", "name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\"))", "write scoring outputs into separate folder for each model i_serialization_dir = serialization_dir /", "in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert that all results", "optimal expected result (i.e. significant peak around the number of non-garbage # features).", "pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna", "are optimized. If True, hyperparameters for clustering # are optimized. The latter case", "config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None,", "raise ValueError(\"To optimize the clustering step, the 'classifier' config parameter must be a", "that all results are compatible assert len(set(len(s) for s in supports)) == 1", "config dictionaries are modified during instantiation, so we need to deepcopy the originals", "mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in", "\"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\":", "preliminary feature selection are used. # None means \"use all features\", an empty", "# big influence on the results. We therefore make sure run multiple RFECV", "SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\", "delta step we allow each leaf output to be. Reported to help with", "end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is", "# modified_huber results in a quadratically smoothed SVM with gamma = 2 loss", "LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model:", "Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\")", "scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if", "to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines):", "if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find out if we are", "optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } # -------------", "# Maximum delta step we allow each leaf output to be. Reported to", "to make a further partition on a leaf node of the tree. \"max_delta_step\":", "classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results in a", "cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for p in", "(i.e. significant peak around the number of non-garbage # features). Similar experiments were", "so that we can retrieve it later and use it to instantiate the", "Testing them separately seems to make more sense to me. :return: classifier config", "optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation", ":return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR:", "1000, # we use early stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\",", "import pandas as pd import seaborn as sns from joblib import dump, delayed,", "splits at most and fall back to leave-one-out (here: one instance = one", "is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al. system, so we", "list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis for each coref link type", "p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be of", "\"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\":", "parameter must be the name of the classifier to optimize.\") if with_clustering and", "for p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be", "config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i,", "generation and aggregate those. results = [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names,", "scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y)", "to use (and sample hyperparameters for). Testing them separately seems to make more", "feature selection are used. # None means \"use all features\", an empty list", "Dict, config_global: Dict, logger: Logger) -> None: \"\"\" Trains n classifier+clustering pipelines with", "help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\":", "# for classifiers only: detailed prediction analysis for each coref link type and", "% {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): # config dictionaries are modified", "depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required to make a", "a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not", "= i return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\")", "\"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE:", "import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path): # load preprocessed dataset", "config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading", "feature selection on the EVALUATION split. Uses 10 runs of 5-fold cross-validation for", "document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in hard_document_clusters]", "random seeds for # the mention pair generation and aggregate those. results =", "= pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg", "were conducted for 1e4 and 1e5 instances. We interpolate between these data points.", "Similar experiments were conducted for 1e4 and 1e5 instances. We interpolate between these", "RFECV. To do that, we need to chop up the pipeline. config =", "on those parameters, evaluation results are not representative. I hope you know what", "useful features. :param config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR]", "results. We therefore make sure run multiple RFECV iterations with different random seeds", "ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global:", "identify last pipeline step.\") # load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning", "= np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease)", "selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to", "as pd import seaborn as sns from joblib import dump, delayed, Parallel, load", "train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"]", "def load_data(path): # load preprocessed dataset from file with open(path, \"rb\") as f:", "classifier = config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate parameters --------------- if", "/ f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger)", "mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir", "determined subset of all mention pairs is used. This has a # big", "{_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True,", "1 assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1 # collect selections in", "\"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results", "pair classifier, the 'classifier' config parameter must be the name of the classifier", "0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as", "last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject hard document clusters", "instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits,", "= pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s)", "instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring", "Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts", "jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing", "} def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\"", "> 1: raise ValueError(\"All pipelines must be of the same type (mention pair", "the directory containing serialized models and scorers :param config_data: :param config_evaluate: :param config_global:", "config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config =", "the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we allow each", "i - 1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\",", "load(p) # find out if we are dealing with mention pair classification or", "Logger from pathlib import Path from typing import Dict, Optional, List, Union, Tuple", "python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config", "from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate import", "Union[str, Dict] # ------------- validate parameters --------------- if not with_clustering and (classifier is", "train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at most and fall", "0.0015 # produced plots closest to the optimal expected result (i.e. significant peak", "return feature_names, support, grid_scores # When using oracle mention pair generation, a randomly", "dictionary with clustering parameters. :param trial: optuna trial :return: config dictionary \"\"\" cluster_criterion", "from python.util.util import get_dict_hash def load_data(path): # load preprocessed dataset from file with", "config dictionary with clustering parameters. :param trial: optuna trial :return: config dictionary \"\"\"", "hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters =", "1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\"))", "metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\")", "used. # None means \"use all features\", an empty list means no features", "SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS,", "hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\")", "}, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name ==", "group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[])", "\"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we allow each leaf output", "from logging import Logger from pathlib import Path from typing import Dict, Optional,", "import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate import tabulate from", "if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with", "last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All", "= selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir", "ValueError(\"All pipelines must be of the same type (mention pair classification or clustering)\")", "= {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\":", "= [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature", "Optuna sampling, CV splits and classifier. optimization_random_seed = 0 # If False, hyperparameters", "random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove", "name of the classifier to optimize.\") if with_clustering and (type(classifier) is str or", "1e-3, 1e2), # Maximum delta step we allow each leaf output to be.", "1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise", "plot feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\",", "# features). Similar experiments were conducted for 1e4 and 1e5 instances. We interpolate", "child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0),", "to optimize.\") if with_clustering and (type(classifier) is str or not classifier): raise ValueError(\"To", "# type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\":", "else: raise ValueError(\"Could not identify last pipeline step.\") # load and prepare data", "doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs into separate folder for", "the dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and", "pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\")", "hard_document_clusters = pickle.load(f) # the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to", "the results. We therefore make sure run multiple RFECV iterations with different random", "prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std", "for classifiers only: detailed prediction analysis for each coref link type and prediction", "last pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not", "f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores = []", "\"selected_features\": selected_features }, \"pairs\": pairs_config } # ------------- get going with optimization now", "\"verbose\": False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size =", "pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path to the directory containing serialized", "\"\"\" # During the hyperparameter optimization, use a fixed random seed for the", "the clustering step, the 'classifier' config parameter must be a complete classifier configuration", "of the mention pair classifier and agglomerative clustering. :param config_data: :param config_model: :param", "_FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\", "# collect mention pair scorer parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring", "matrix + # labels into the mention pair classifier, and feed that to", "data points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025])", "\"grid_scores.csv\")) # plot feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax =", ":return: classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER:", "must be a complete classifier configuration in the form of a dictionary.\") #", "objective(trial: Trial): # config dictionaries are modified during instantiation, so we need to", "in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be of the", "# Minimum loss reduction required to make a further partition on a leaf", "0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for imbalanced datasets", "TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data:", "model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir", "# for cross-validation, make 6 splits at most and fall back to leave-one-out", "SVM with gamma = 2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss", "\"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required to make a further", "range follows the suggestions of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS:", "are modified during instantiation, so we need to deepcopy the originals to not", "Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir:", "config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber", "the same type (mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare", "if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for a mention pair scoring", "optimized. The latter case needs a full classifier configuration, see below. with_clustering =", "multiple RFECV iterations with different random seeds for # the mention pair generation", "\"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir /", "eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs into separate folder", "last pipeline step.\") # load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning =", "pipeline up to the point where we input the feature matrix + #", "= \"log\" else: raise ValueError # alpha range follows the suggestions of the", "has a # big influence on the results. We therefore make sure run", "Dict: \"\"\" Returns config section for all feature extractors with default values. :return:", "logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines =", "metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature", "type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier)", "RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have very imbalanced data", "optimize the clustering step, the 'classifier' config parameter must be a complete classifier", "\"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path)", "np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector =", "eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating", "on the results. We therefore make sure run multiple RFECV iterations with different", "1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\":", "config in the trial so that we can retrieve it later and use", "less use in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors", "scores df_grid_scores = [] for m in grid_scores: # number of features and", "Trains n classifier+clustering pipelines with a given configuration. :param config_data: :param config_model: :param", "be. Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\",", "as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores =", "from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring", "to the point where we input the feature matrix + # labels into", "= instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at", "f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv,", "copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir /", "import json import pickle import pprint from logging import Logger from pathlib import", "not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str", "where we input the feature matrix + # labels into the mention pair", "!= 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats}", "= 6) -> Tuple[List[str], np.array, np.array]: # RFECV needs X to be an", "\"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5,", "stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate", "{}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict,", "Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\":", "or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) >", "Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) #", "the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config,", "pipelines[i] = load(p) # find out if we are dealing with mention pair", "import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR,", "\"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\":", "= {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use early stopping,", "# ------------- create base config to more or less use in each optimization", "df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results plot_destination", "optimize.\") if with_clustering and (type(classifier) is str or not classifier): raise ValueError(\"To optimize", "= config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y", "a config with classifier hyperparameters. :param trial: Optuna trial :param classifier_name: The classifier", "tested, and 0.0015 # produced plots closest to the optimal expected result (i.e.", "value to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster}", "for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines,", "selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \"", "data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y =", "them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str config[\"classifier\"] =", "= config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): #", "== CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair scorer parameters if not", "series of manual experiments with a varying number of features # producing random", "fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config,", "supports, grid_scores = list(zip(*results)) # assert that all results are compatible assert len(set(len(s)", "store the config in the trial so that we can retrieve it later", "Maximum delta step we allow each leaf output to be. Reported to help", "get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline,", "str or not classifier): raise ValueError(\"To optimize the clustering step, the 'classifier' config", "\"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}}", "1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight", "str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config in the trial so", "classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\",", "classifier configuration in the form of a dictionary.\") # ------------- create base config", "= 2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else:", "[] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert", "LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION,", "assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the", "json import pickle import pprint from logging import Logger from pathlib import Path", "gamma = 2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\"", "is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir)", "(serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect", "configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict]", "Minimum loss reduction required to make a further partition on a leaf node", "result (i.e. significant peak around the number of non-garbage # features). Similar experiments", "classifier_name == SVC_HUBER: # modified_huber results in a quadratically smoothed SVM with gamma", "f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger,", "hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the format in the pickle file", "num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease =", "pairs is used. This has a # big influence on the results. We", "make a further partition on a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\",", "def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for all feature extractors with", "- 1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS:", "in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline", "= RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have very imbalanced", "needs X to be an matrix-like of shape (n_samples, n_features). This means we", "str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline if is_clustering_pipeline: scorer =", "leave-one-out (here: one instance = one partition) # if there are few partitions", "a dictionary.\") # ------------- create base config to more or less use in", "{_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1),", "extracted features so that only those from preliminary feature selection are used. #", "using the dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline", "config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for a mention pair", "remove the classifier at the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] #", "useful feature selection result. # min_impurity_decrease was determined based on a series of", "logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path to the", "+ \"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for m in grid_scores: #", "config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file()", "pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain", "if present, inject hard document clusters into the last pipeline stage (the clustering", "** (num_hidden_layers - i - 1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config", "stored as a string, using the dict object did not work trial.set_user_attr(\"config\", json.dumps(config))", "a useful feature selection result. # min_impurity_decrease was determined based on a series", "dictionary.\") # ------------- create base config to more or less use in each", "def run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str], np.array, np.array]: # RFECV", "if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\",", "WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from", "few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if", "the hyperparameter optimization, use a fixed random seed for the Optuna sampling, CV", "pipelines must be of the same type (mention pair classification or clustering)\") last_pipeline_step_name", "import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from", "n_splits: int = 6) -> Tuple[List[str], np.array, np.array]: # RFECV needs X to", "return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"]))", "config_training: Dict, config_global: Dict, logger: Logger) -> None: \"\"\" Trains n classifier+clustering pipelines", "parameters, evaluation results are not representative. I hope you know what you're doing.\")", "TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS,", "around the number of non-garbage # features). Similar experiments were conducted for 1e4", ":param config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train =", "was determined based on a series of manual experiments with a varying number", "return pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs =", "\"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) ->", "import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER,", "use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv", "trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers", "# assert that all results are compatible assert len(set(len(s) for s in supports))", "allow each leaf output to be. Reported to help with imbalanced data. \"subsample\":", "we use early stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0),", "= config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation =", "CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import", "parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in", "not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs", "1, \"n_estimators\": 1000, # we use early stopping, so this is the maximum", "\"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1,", "Returns config section for all feature extractors with default values. :return: \"\"\" return", "len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp,", "an empty list means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) #", "selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have very", "and (type(classifier) is str or not classifier): raise ValueError(\"To optimize the clustering step,", "most and fall back to leave-one-out (here: one instance = one partition) #", "instance = one partition) # if there are few partitions cv_num_splits = min(6,", "scorers :param config_data: :param config_evaluate: :param config_global: :param logger: :return: metrics Dataframe \"\"\"", "each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits", "in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et", "if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config", "pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config", "= metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to disk", "50) hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size for", "get going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning", "Random Forest mention classifier to find the most useful features. :param config_data: :param", "clustering # are optimized. The latter case needs a full classifier configuration, see", "elimination with a Random Forest mention classifier to find the most useful features.", "{_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use early stopping, so", "classifier_name: str) -> Dict: \"\"\" Uses optuna to sample a config with classifier", "serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\")", "0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats", "df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature", "== len(feature_names) return feature_names, support, grid_scores # When using oracle mention pair generation,", "for fn in feature_names)) == 1 # collect selections in DataFrame selections =", "pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False,", "# We set min_impurity_decrease depending on the number of instances to obtain a", "CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y)", "only those from preliminary feature selection are used. # None means \"use all", "data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0),", "config_model: :param config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train", "config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for", "assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert", "definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\":", "logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config", "\"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir:", "selection on the EVALUATION split. Uses 10 runs of 5-fold cross-validation for recursive", "the classifier at the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type:", "stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file", "of all mention pairs is used. This has a # big influence on", "= config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y", "that to RFECV. To do that, we need to chop up the pipeline.", "separate folder for each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate", "Logger) -> None: \"\"\" Trains n classifier+clustering pipelines with a given configuration. :param", "num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by =", "\"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None },", "'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1,", "random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X = pipeline.fit_transform(X, y)", "1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for imbalanced datasets (which", "logger: Logger): \"\"\" Runs feature selection on the EVALUATION split. Uses 10 runs", "scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv", "if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes,", "the 'classifier' config parameter must be a complete classifier configuration in the form", "train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"]", "perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline:", "load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE:", "Dict, config_training: Dict, config_global: Dict, logger: Logger) -> None: \"\"\" Trains n classifier+clustering", "({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial):", "[] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed)", "= KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on the number of", "\"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1,", "/ \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict,", "low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average',", "\"\"\" To be used for hyperparameter optimization of the mention pair classifier and", "\"\"\" Predicts and evaluates :param model_serialization_dir: path to the directory containing serialized models", "number of features and CV-score for that number of features x_and_y = np.vstack([np.arange(1,", "be used for hyperparameter optimization of the mention pair classifier and agglomerative clustering.", "no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config =", "of the classifier to optimize.\") if with_clustering and (type(classifier) is str or not", "get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}},", "feature extractors with default values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {},", "df_grid_scores = [] for m in grid_scores: # number of features and CV-score", "TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE,", "f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict:", "use f1_weighted because we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names", "for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\",", "{cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): # config dictionaries", "from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts", "repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): # config dictionaries are", "logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i,", "for all feature extractors with default values. :return: \"\"\" return { LEMMA_EXTR: {},", "copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier)", "import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR", "MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna import", "ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP", "pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if", "scorer parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires a", "copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir /", "= {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median',", "there are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs =", "def predict_and_evaluate(i, pipeline): # write scoring outputs into separate folder for each model", "analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from", "on underscores and pick the last value to obtain the document id hard_document_clusters", "config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base =", "\"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError # alpha", "CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject hard document clusters into the", "in grid_scores: # number of features and CV-score for that number of features", "pair generation, a randomly determined subset of all mention pairs is used. This", "Dict, logger: Logger): \"\"\" To be used for hyperparameter optimization of the mention", "Trial) -> Dict: \"\"\" Uses optuna to sample a config dictionary with clustering", "config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to", "raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to", "full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type:", "hyperparameters. :param trial: Optuna trial :param classifier_name: The classifier to use (and sample", "\".pipeline.joblib\": pipelines[i] = load(p) # find out if we are dealing with mention", "= [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) #", "n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 =", "train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at", "/ \"grid_scores.csv\")) # plot feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax", "Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis for", "parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs", "model_serialization_dir: path to the directory containing serialized models and scorers :param config_data: :param", "know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if", "verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_", "pair classification are optimized. If True, hyperparameters for clustering # are optimized. The", "5-fold cross-validation for recursive feature elimination with a Random Forest mention classifier to", "at the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert", "= classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X,", "out if we are dealing with mention pair classification or clustering pipelines last_pipeline_step_names", "i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for", "pipeline step.\") # load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"]", "'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def", "pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features", "to use for imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10),", "num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation", "model - # don't ask me why it needs to be stored as", "Dict, logger: Logger) -> None: \"\"\" Trains n classifier+clustering pipelines with a given", "{} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger):", "dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict,", "with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\",", "\"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"])", "mention pair generation, a randomly determined subset of all mention pairs is used.", "\"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features))", "trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required", "-> Tuple[List[str], np.array, np.array]: # RFECV needs X to be an matrix-like of", "min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES],", "hyperparameters for clustering # are optimized. The latter case needs a full classifier", "to me. :return: classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name", "sample a config dictionary with clustering parameters. :param trial: optuna trial :return: config", "config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate parameters --------------- if not with_clustering", "logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir /", "serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes", "selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir /", "best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger) ->", "\\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline,", "# alpha range follows the suggestions of the sklearn documentation classifier_config = {_TYPE:", "dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not", "import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from", "else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config", "used for hyperparameter optimization of the mention pair classifier and agglomerative clustering. :param", "assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and labels", "= json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write", "0 # If False, hyperparameters for mention pair classification are optimized. If True,", "KFold from sklearn.pipeline import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\", "Optional, List, Union, Tuple import numpy as np import optuna import pandas as", "\"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global:", "run multiple RFECV iterations with different random seeds for # the mention pair", "from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path): # load", "follows the suggestions of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\":", "- # don't ask me why it needs to be stored as a", "use it to instantiate the best model - # don't ask me why", "matrix-like. So we run our pipeline up to the point where we input", "optimization of the mention pair classifier and agglomerative clustering. :param config_data: :param config_model:", "our pipeline as is, # because our X's are not matrix-like. So we", "from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance", "# type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature", "0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to", "== 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\":", "grid_scores = list(zip(*results)) # assert that all results are compatible assert len(set(len(s) for", "and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant =", "= np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector", "(mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name", "load from optuna import Trial from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV", "/ \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict,", "conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True)", "producing random and zero values. For 1e3 instances, values between 1e-7 and 1e-1", "= sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store", "if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study", "collect mention pair scorer parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring mention", "expected result (i.e. significant peak around the number of non-garbage # features). Similar", "[{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering", "and aggregate those. results = [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports,", "points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease", "ValueError(\"Could not identify last pipeline step.\") # load and prepare data eval_data =", "selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values", "len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be of the same type (mention", "classifier hyperparameters. :param trial: Optuna trial :param classifier_name: The classifier to use (and", "10), # min required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12),", "from typing import Dict, Optional, List, Union, Tuple import numpy as np import", "separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes =", "configuration. :param config_data: :param config_model: :param config_training: :param config_global: :param logger: :return: \"\"\"", "{} # type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if", "Depending on those parameters, evaluation results are not representative. I hope you know", "base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config =", "# recommended to use for imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\",", "X to be an matrix-like of shape (n_samples, n_features). This means we cannot", "= optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks)", "loss reduction required to make a further partition on a leaf node of", "metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction", "in the form of a dictionary.\") # ------------- create base config to more", "between these data points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp =", "use (and sample hyperparameters for). Testing them separately seems to make more sense", "prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair", "= {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config }", "values. For 1e3 instances, values between 1e-7 and 1e-1 were tested, and 0.0015", "of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\",", "{p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must", "# write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True)", "------------- validate parameters --------------- if not with_clustering and (classifier is None or type(classifier)", "random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean()", "manual experiments with a varying number of features # producing random and zero", "}, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str],", "0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size", "string, using the dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature", "for # the mention pair generation and aggregate those. results = [] for", "obtain feature matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X)", "import copy import json import pickle import pprint from logging import Logger from", "sure run multiple RFECV iterations with different random seeds for # the mention", "(n_samples, n_features). This means we cannot use our pipeline as is, # because", "further partition on a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2),", "to the optimal expected result (i.e. significant peak around the number of non-garbage", "work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier, transform the features pipeline,", "actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending", "metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with", "tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True,", "all features\", an empty list means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\",", "sense to me. :return: classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if", "import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from", "each leaf output to be. Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\",", "\"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } # ------------- get going with", "xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", #", "the most useful features. :param config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir", "XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use", "for mention pair classification are optimized. If True, hyperparameters for clustering # are", "pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline step.\") # load and", "logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats %", "1.0), # recommended to use for imbalanced datasets (which we definitely have) \"scale_pos_weight\":", "\"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\",", "}, \"pairs\": pairs_config } # ------------- get going with optimization now --------------- serialization_dir", "timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial))", "a given configuration. :param config_data: :param config_model: :param config_training: :param config_global: :param logger:", "return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for all feature", "/ \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials", "Predicts and evaluates :param model_serialization_dir: path to the directory containing serialized models and", "number of features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores =", "data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\":", "1: raise ValueError(\"All pipelines must be of the same type (mention pair classification", "the name of the classifier to optimize.\") if with_clustering and (type(classifier) is str", "selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with", "np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running", "in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only:", "if not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\")", "type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) ==", ":param config_global: :param logger: :return: \"\"\" # During the hyperparameter optimization, use a", "with mention pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in", "if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be of the same type", "in supports)) == 1 assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1 #", "= config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y", "fixed random seed for the Optuna sampling, CV splits and classifier. optimization_random_seed =", "the last pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is", "pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False,", "eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger,", "a # big influence on the results. We therefore make sure run multiple", "\"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True,", "system, so we split on underscores and pick the last value to obtain", "scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier", "config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\")", "get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs into separate", "direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial =", "hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard", "optimization, use a fixed random seed for the Optuna sampling, CV splits and", "type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config in the", "= one partition) # if there are few partitions cv_num_splits = min(6, len(train_X))", "with a given configuration. :param config_data: :param config_model: :param config_training: :param config_global: :param", "+ # labels into the mention pair classifier, and feed that to RFECV.", "is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features so", "tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return", "instantiate feature pipeline and classifier, transform the features pipeline, scoring = instantiate_pipeline(logger, config,", "based on a series of manual experiments with a varying number of features", "cannot use our pipeline as is, # because our X's are not matrix-like.", "# use f1_weighted because we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\")", "optuna import Trial from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection", "= [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document", "-> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path to the directory containing", "and feed that to RFECV. To do that, we need to chop up", "results plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\", data=df_grid_scores) fig =", "**config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds =", "Runs feature selection on the EVALUATION split. Uses 10 runs of 5-fold cross-validation", "sampling, CV splits and classifier. optimization_random_seed = 0 # If False, hyperparameters for", "not lose them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str", "trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended", "is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config in the trial", "dict): raise ValueError(\"To optimize the mention pair classifier, the 'classifier' config parameter must", "instances, values between 1e-7 and 1e-1 were tested, and 0.0015 # produced plots", "1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return", "and scorers :param config_data: :param config_evaluate: :param config_global: :param logger: :return: metrics Dataframe", "of non-garbage # features). Similar experiments were conducted for 1e4 and 1e5 instances.", "\"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1,", "classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME:", "pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes))", "config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"]", "compatible assert len(set(len(s) for s in supports)) == 1 assert len(set(get_dict_hash(fn) for fn", "elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5,", "else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single',", "eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring", "serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning", "min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by =", "if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient", "write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features))", "path to the directory containing serialized models and scorers :param config_data: :param config_evaluate:", "min_impurity_decrease was determined based on a series of manual experiments with a varying", "config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(),", "10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\",", "pickle.load(f) # the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used", "# write scoring outputs into separate folder for each model i_serialization_dir = serialization_dir", "= config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) !=", "/ str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline if is_clustering_pipeline: scorer", "\"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for imbalanced datasets (which we", "python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def", "logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config to file best_config_file = serialization_dir", "for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] =", "and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X,", "MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics,", "# RFECV needs X to be an matrix-like of shape (n_samples, n_features). This", "instantiate the best model - # don't ask me why it needs to", "len(feature_names) return feature_names, support, grid_scores # When using oracle mention pair generation, a", "'maxclust']) cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10)", "for clustering # are optimized. The latter case needs a full classifier configuration,", "2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise", "def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger) -> None:", "(classifier is None or type(classifier) is dict): raise ValueError(\"To optimize the mention pair", "= {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines", "hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the format", "disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p,", "load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"]", "X's are not matrix-like. So we run our pipeline up to the point", "When using oracle mention pair generation, a randomly determined subset of all mention", "# min required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), #", "str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"]", "what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present,", "not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0,", "config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for", "make more sense to me. :return: classifier config \"\"\" if classifier_name in [SVC_HUBER,", "that we can retrieve it later and use it to instantiate the best", "json.dumps(config)) # instantiate feature pipeline and classifier, transform the features pipeline, scoring =", "sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate import tabulate", "pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name ==", "def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to sample a config dictionary", "\"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth}", "# ------------- validate parameters --------------- if not with_clustering and (classifier is None or", "sample_classifier_config_with_optuna(trial, classifier) # store the config in the trial so that we can", "RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate", "get_dict_hash def load_data(path): # load preprocessed dataset from file with open(path, \"rb\") as", "classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, #", "pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config =", "f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate", "\"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\":", "= trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1)", "of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper", "== CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject hard document clusters into", "Union, Tuple import numpy as np import optuna import pandas as pd import", "serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in", "use for imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\":", "TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials,", "features # producing random and zero values. For 1e3 instances, values between 1e-7", "cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion ==", "later and use it to instantiate the best model - # don't ask", "in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)]", "{_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\":", "== MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes", "it needs to be stored as a string, using the dict object did", ":return: \"\"\" # During the hyperparameter optimization, use a fixed random seed for", "were tested, and 0.0015 # produced plots closest to the optimal expected result", "TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} }", "None or type(classifier) is dict): raise ValueError(\"To optimize the mention pair classifier, the", "== SVC_HUBER: # modified_huber results in a quadratically smoothed SVM with gamma =", "import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline,", "= [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler =", "them separately seems to make more sense to me. :return: classifier config \"\"\"", "to be stored as a string, using the dict object did not work", "ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to sample", "1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use", "the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\"", "al. system, so we split on underscores and pick the last value to", "6) -> Tuple[List[str], np.array, np.array]: # RFECV needs X to be an matrix-like", "node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we", "shuffle=True) # We set min_impurity_decrease depending on the number of instances to obtain", "[SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results in a quadratically smoothed", "\"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config", "python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path): # load preprocessed", "grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support, grid_scores # When", "with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"]", "hyperparameter optimization of the mention pair classifier and agglomerative clustering. :param config_data: :param", "{\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\":", "config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation =", "= config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning =", "for a mention pair scoring scenario. Depending on those parameters, evaluation results are", "\"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2)", "/ \"pipeline\") # remove the classifier at the end of the pipeline classifier_wrapper", "# instantiate feature pipeline and classifier, transform the features pipeline, scoring = instantiate_pipeline(logger,", "matrix-like of shape (n_samples, n_features). This means we cannot use our pipeline as", "is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes =", "evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\"", "= instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv =", "instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at the", "data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = {", "\"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name", "significant peak around the number of non-garbage # features). Similar experiments were conducted", "# because our X's are not matrix-like. So we run our pipeline up", "callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler", "as np import optuna import pandas as pd import seaborn as sns from", "the classifier to optimize.\") if with_clustering and (type(classifier) is str or not classifier):", "> 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\")", "study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds,", "at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config", "= trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 **", "None: \"\"\" Trains n classifier+clustering pipelines with a given configuration. :param config_data: :param", "1e2), # Maximum delta step we allow each leaf output to be. Reported", "min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and", "pathlib import Path from typing import Dict, Optional, List, Union, Tuple import numpy", "Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To be used for hyperparameter", "False # collect mention pair scorer parameters if not \"pairs\" in config_data: raise", "config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline:", "i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict,", "SVC_HUBER: # modified_huber results in a quadratically smoothed SVM with gamma = 2", "= get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is", "pprint from logging import Logger from pathlib import Path from typing import Dict,", "dump, delayed, Parallel, load from optuna import Trial from optuna.samplers import TPESampler from", "True, hyperparameters for clustering # are optimized. The latter case needs a full", "is used. This has a # big influence on the results. We therefore", "as sns from joblib import dump, delayed, Parallel, load from optuna import Trial", "obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster", "get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return feature_names,", "feature pipeline and classifier, transform the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering,", "df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot", "selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\":", "instantiation, so we need to deepcopy the originals to not lose them config", "[delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes =", "logger.info(\"Finding and loading model pipelines from disk.\") pipelines = {} # type: Dict[int,", "Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path to", "Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature selection on the EVALUATION split.", "early stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning", "serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at the end of the pipeline", "in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler,", "hyperparameters for). Testing them separately seems to make more sense to me. :return:", "ask me why it needs to be stored as a string, using the", "with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate", "trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\",", "so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\":", "verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\"", "* {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): # config", "ValueError # alpha range follows the suggestions of the sklearn documentation classifier_config =", "Dict, logger: Logger): \"\"\" Runs feature selection on the EVALUATION split. Uses 10", "best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training:", "to deepcopy the originals to not lose them config = copy.deepcopy(base_config) if with_clustering:", "in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline step.\") # load", "PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and", "oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs into separate folder for each", "pair scoring scenario. Depending on those parameters, evaluation results are not representative. I", "random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted", "config_global: Dict, logger: Logger): \"\"\" Runs feature selection on the EVALUATION split. Uses", "# write to file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f:", "import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis", "pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf =", "pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines", "dealing with mention pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p", "those parameters, evaluation results are not representative. I hope you know what you're", "clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file)", "# None means \"use all features\", an empty list means no features at", "validate parameters --------------- if not with_clustering and (classifier is None or type(classifier) is", "and 0.0015 # produced plots closest to the optimal expected result (i.e. significant", "loading model pipelines from disk.\") pipelines = {} # type: Dict[int, Pipeline] for", "in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\")", "\"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger:", "# are optimized. The latter case needs a full classifier configuration, see below.", "leaf output to be. Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5,", "import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from", "3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss", "one instance = one partition) # if there are few partitions cv_num_splits =", "= config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def", "with different random seeds for # the mention pair generation and aggregate those.", "plots closest to the optimal expected result (i.e. significant peak around the number", "not None: logger.warning(\"'mpg_prediction' was specified for a mention pair scoring scenario. Depending on", "Trial): # config dictionaries are modified during instantiation, so we need to deepcopy", "Dict] # ------------- validate parameters --------------- if not with_clustering and (classifier is None", "to obtain a useful feature selection result. # min_impurity_decrease was determined based on", "= [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\",", "# collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1)", "# type: Union[str, Dict] # ------------- validate parameters --------------- if not with_clustering and", "# obtain feature matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y =", "the originals to not lose them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier)", "number of instances to obtain a useful feature selection result. # min_impurity_decrease was", "= [] for m in grid_scores: # number of features and CV-score for", "f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger):", "config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) #", "disk.\") pipelines = {} # type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i", "config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir =", "of features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores))", ":param config_evaluate: :param config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR])", "config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write", "{ \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits:", "feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature selection on the EVALUATION", "import seaborn as sns from joblib import dump, delayed, Parallel, load from optuna", "classifier to find the most useful features. :param config_data: :param config_global: :param logger:", "= 0 # If False, hyperparameters for mention pair classification are optimized. If", "predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i,", "train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if", "pipelines from disk.\") pipelines = {} # type: Dict[int, Pipeline] for p in", "MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes =", "extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features so that only", "LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results in a quadratically smoothed SVM", "in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) #", "joblib import dump, delayed, Parallel, load from optuna import Trial from optuna.samplers import", "fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use", "study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config))", "result. # min_impurity_decrease was determined based on a series of manual experiments with", "metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"]", "config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) %", "oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults()", "most useful features. :param config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir =", "True # if present, inject hard document clusters into the last pipeline stage", "in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\",", "load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning,", "the mention pair classifier, and feed that to RFECV. To do that, we", "config_data: :param config_model: :param config_hyperopt: :param config_global: :param logger: :return: \"\"\" # During", "for that number of features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y)", "eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger,", "optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\"))", "serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\") pipelines = {}", "classifier, transform the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir", "labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed,", "= pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) #", "get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at most and", "'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\",", "CPUs != 0)\") def objective(trial: Trial): # config dictionaries are modified during instantiation,", "pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers", "means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config", "trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\":", "alpha range follows the suggestions of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\",", "the suggestions of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss,", ":param logger: :return: \"\"\" # During the hyperparameter optimization, use a fixed random", "to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0),", "# aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"]", "True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\",", "step.\") # load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation", "--------------- if not with_clustering and (classifier is None or type(classifier) is dict): raise", "= Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f)", "is str or not classifier): raise ValueError(\"To optimize the clustering step, the 'classifier'", "experiments were conducted for 1e4 and 1e5 instances. We interpolate between these data", "from file with open(path, \"rb\") as f: data = pickle.load(f) return data def", "and use it to instantiate the best model - # don't ask me", "int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger,", "classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use early", "used. This has a # big influence on the results. We therefore make", "f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) ->", "= TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective,", "we input the feature matrix + # labels into the mention pair classifier,", "load_data(path): # load preprocessed dataset from file with open(path, \"rb\") as f: data", "not matrix-like. So we run our pipeline up to the point where we", "config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True,", "= CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X,", "we split on underscores and pick the last value to obtain the document", "\"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min", "selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have", "that only those from preliminary feature selection are used. # None means \"use", "now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation =", "requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is", "doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] =", "classifier to optimize.\") if with_clustering and (type(classifier) is str or not classifier): raise", "not identify last pipeline step.\") # load and prepare data eval_data = load_data(config_data[\"eval_data_path\"])", "of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we allow", "pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics,", "min_impurity_decrease depending on the number of instances to obtain a useful feature selection", "} def run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str], np.array, np.array]: #", "config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y =", "number of non-garbage # features). Similar experiments were conducted for 1e4 and 1e5", "going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning =", "so we split on underscores and pick the last value to obtain the", "# instantiate scorer which fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir)", "config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if", "datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\",", "0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\":", "# plot feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\",", "To do that, we need to chop up the pipeline. config = copy.deepcopy(config_base)", "subset of all mention pairs is used. This has a # big influence", "idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def", "0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses", "models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs)", "= study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" +", "/ \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def", "/ \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores", "raise ValueError # alpha range follows the suggestions of the sklearn documentation classifier_config", "is dict): raise ValueError(\"To optimize the mention pair classifier, the 'classifier' config parameter", "retrieve it later and use it to instantiate the best model - #", "trial :param classifier_name: The classifier to use (and sample hyperparameters for). Testing them", "pipeline and classifier, transform the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True,", "type (mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if", "[\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"],", "the 'classifier' config parameter must be the name of the classifier to optimize.\")", "eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs", "import Path from typing import Dict, Optional, List, Union, Tuple import numpy as", "very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_", "separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if", "serialized models and scorers :param config_data: :param config_evaluate: :param config_global: :param logger: :return:", "config_hyperopt: :param config_global: :param logger: :return: \"\"\" # During the hyperparameter optimization, use", "\"pairs\": pairs_config } # ------------- get going with optimization now --------------- serialization_dir =", "lose them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str config[\"classifier\"]", "Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\":", "fall back to leave-one-out (here: one instance = one partition) # if there", "data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores =", "Parallel, load from optuna import Trial from optuna.samplers import TPESampler from sklearn.feature_selection import", "trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name", "logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines", "from disk.\") pipelines = {} # type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir():", "the point where we input the feature matrix + # labels into the", "not classifier): raise ValueError(\"To optimize the clustering step, the 'classifier' config parameter must", "analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True)", "so that only those from preliminary feature selection are used. # None means", "the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the", "oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation)", "with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X,", "predict_and_evaluate(i, pipeline): # write scoring outputs into separate folder for each model i_serialization_dir", ":param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation", "CV splits and classifier. optimization_random_seed = 0 # If False, hyperparameters for mention", "data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna to sample", "we need to deepcopy the originals to not lose them config = copy.deepcopy(base_config)", "for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) #", "config:\\n\" + pprint.pformat(best_config)) # write best config to file best_config_file = serialization_dir /", "= int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find out if", "Forest mention classifier to find the most useful features. :param config_data: :param config_global:", "optuna trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth", "\"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results plot_destination = serialization_dir /", "clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last", "[delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\")", "are dealing with mention pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for", "seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert that all", "= [2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size for i in", "as f: hard_document_clusters = pickle.load(f) # the format in the pickle file is", "best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) #", "imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5,", "import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr", "\"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs =", "= sample_classifier_config_with_optuna(trial, classifier) # store the config in the trial so that we", "{}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict,", "_KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation", "\"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y,", "_KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000,", "for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for p", "in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values #", "i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline", "1e-3, 1e0), # Minimum loss reduction required to make a further partition on", "we can retrieve it later and use it to instantiate the best model", "{\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\":", "len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits", "repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config to file best_config_file =", "= config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base", "this to filter extracted features so that only those from preliminary feature selection", "hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the format in the", "headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg", "features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns", "\"50%\"], level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir", "= MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return", "disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\")", "which fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer =", "classifier) # store the config in the trial so that we can retrieve", "output to be. Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0),", "up to the point where we input the feature matrix + # labels", "y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks", "5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers =", "config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was", "mention pair generation and aggregate those. results = [] for seed in range(7):", "def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To", "trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion == 'inconsistent' else", "make sure run multiple RFECV iterations with different random seeds for # the", "np.array]: # RFECV needs X to be an matrix-like of shape (n_samples, n_features).", "is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1,", "metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to", "last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\":", "need to deepcopy the originals to not lose them config = copy.deepcopy(base_config) if", "feature matrix + # labels into the mention pair classifier, and feed that", "back to leave-one-out (here: one instance = one partition) # if there are", "using oracle mention pair generation, a randomly determined subset of all mention pairs", "= {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4,", "Dict: \"\"\" Uses optuna to sample a config dictionary with clustering parameters. :param", "load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning,", "actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert len(support)", "documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\":", "last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair scorer parameters if", "grid_scores: # number of features and CV-score for that number of features x_and_y", "callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds", "write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str", "config_model: :param config_hyperopt: :param config_global: :param logger: :return: \"\"\" # During the hyperparameter", "random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir,", "on the EVALUATION split. Uses 10 runs of 5-fold cross-validation for recursive feature", "pipelines.values()} if len(last_pipeline_step_names) > 1: raise ValueError(\"All pipelines must be of the same", "LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance", "# During the hyperparameter optimization, use a fixed random seed for the Optuna", "raise ValueError(\"To optimize the mention pair classifier, the 'classifier' config parameter must be", "logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data", "= config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make", "values between 1e-7 and 1e-1 were tested, and 0.0015 # produced plots closest", "in the trial so that we can retrieve it later and use it", "with gamma = 2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss =", "p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate:", "# prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention", "config_data: :param config_model: :param config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir =", "f: hard_document_clusters = pickle.load(f) # the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id", "to sample a config dictionary with clustering parameters. :param trial: optuna trial :return:", "random and zero values. For 1e3 instances, values between 1e-7 and 1e-1 were", "list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect", "{\"n_jobs\": 1, \"n_estimators\": 1000, # we use early stopping, so this is the", "extractors with default values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR:", "pipelines to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in", "== LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError # alpha range follows the", "point where we input the feature matrix + # labels into the mention", "1e-1 were tested, and 0.0015 # produced plots closest to the optimal expected", "model pipelines from disk.\") pipelines = {} # type: Dict[int, Pipeline] for p", "a full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] #", "# don't ask me why it needs to be stored as a string,", "config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"]", "= list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False #", "hope you know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True", "into separate folder for each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) #", "Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path to the directory", "\"early_stopping\" in config_hyperopt: callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt[\"early_stopping\"])) callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study =", "\\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR,", "examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data,", "to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg,", "mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for a", "return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature selection", "X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\",", "not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant,", "feature_names)) == 1 # collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\"))", "# remove the classifier at the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1]", "I hope you know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline =", "because our X's are not matrix-like. So we run our pipeline up to", "Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5,", "for imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\",", "config section for all feature extractors with default values. :return: \"\"\" return {", "0.5, 1.0), # recommended to use for imbalanced datasets (which we definitely have)", "model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path,", "\"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find out if we are dealing", "config_global: Dict, logger: Logger): \"\"\" To be used for hyperparameter optimization of the", "python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import", "open(path, \"rb\") as f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name:", "1, 10), # min required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3,", "support, grid_scores # When using oracle mention pair generation, a randomly determined subset", "mention pair classification are optimized. If True, hyperparameters for clustering # are optimized.", "len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir", "optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"])", "the best model - # don't ask me why it needs to be", "in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger)", "python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import", "= config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics)", "DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write", "We therefore make sure run multiple RFECV iterations with different random seeds for", "cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs}", "the config in the trial so that we can retrieve it later and", "the optimal expected result (i.e. significant peak around the number of non-garbage #", "features. :param config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path", "from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score,", "1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers -", "to the directory containing serialized models and scorers :param config_data: :param config_evaluate: :param", "classifier): raise ValueError(\"To optimize the clustering step, the 'classifier' config parameter must be", "+ metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs", "big influence on the results. We therefore make sure run multiple RFECV iterations", "def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame:", "if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\",", "-> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config,", "import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring,", "present, inject hard document clusters into the last pipeline stage (the clustering stage)", "MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME,", "{ \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None", "pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in", "cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs >", "to make more sense to me. :return: classifier config \"\"\" if classifier_name in", "config to more or less use in each optimization step ------------ extractors =", "outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by", "we are dealing with mention pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0]", "clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline =", "selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support, grid_scores # When using oracle", "num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2", "mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for a mention pair scoring scenario.", "feature_names, support, grid_scores # When using oracle mention pair generation, a randomly determined", "outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis for each coref", "import numpy as np import optuna import pandas as pd import seaborn as", "be stored as a string, using the dict object did not work trial.set_user_attr(\"config\",", "np import optuna import pandas as pd import seaborn as sns from joblib", "python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback", "use a fixed random seed for the Optuna sampling, CV splits and classifier.", "{cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): # config dictionaries are modified during", "1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\":", "to RFECV. To do that, we need to chop up the pipeline. config", "\\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis", "len(set(get_dict_hash(fn) for fn in feature_names)) == 1 # collect selections in DataFrame selections", "m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) #", "values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {},", "our pipeline up to the point where we input the feature matrix +", "trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum", "= config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified", "if with_clustering: assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial)", "pair classifier, and feed that to RFECV. To do that, we need to", "scorer which fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer", "\"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we use early stopping, so this", "p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline step.\") # load and prepare", "and (classifier is None or type(classifier) is dict): raise ValueError(\"To optimize the mention", "random seed for the Optuna sampling, CV splits and classifier. optimization_random_seed = 0", "doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation)", "[\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results plot_destination = serialization_dir", "delayed, Parallel, load from optuna import Trial from optuna.samplers import TPESampler from sklearn.feature_selection", "clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for all feature extractors", "partition) # if there are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats =", "are not matrix-like. So we run our pipeline up to the point where", "fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\")", "with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) #", "\"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), #", "classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError # alpha range follows", "with the Barhom et al. system, so we split on underscores and pick", "imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\":", "loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError", "# config dictionaries are modified during instantiation, so we need to deepcopy the", "= min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1", "in feature_names)) == 1 # collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0],", "index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\"", "config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\":", "\"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError", "# ------------- get going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data =", "cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have very imbalanced data verbose=1) selector.fit(actual_X,", "in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict,", "to sample a config with classifier hyperparameters. :param trial: Optuna trial :param classifier_name:", "python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES", "0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\":", "pipelines with a given configuration. :param config_data: :param config_model: :param config_training: :param config_global:", "the Optuna sampling, CV splits and classifier. optimization_random_seed = 0 # If False,", "n classifier+clustering pipelines with a given configuration. :param config_data: :param config_model: :param config_training:", "we allow each leaf output to be. Reported to help with imbalanced data.", "x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns =", "and agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt: :param config_global: :param logger:", "(type(classifier) is str or not classifier): raise ValueError(\"To optimize the clustering step, the", "so we need to deepcopy the originals to not lose them config =", "len(support) == len(feature_names) return feature_names, support, grid_scores # When using oracle mention pair", "= pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection", "optuna import pandas as pd import seaborn as sns from joblib import dump,", "results are not representative. I hope you know what you're doing.\") elif last_pipeline_step_name", "True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial)", "a Random Forest mention classifier to find the most useful features. :param config_data:", "from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import", "and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits,", "hard document clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise", "it to instantiate the best model - # don't ask me why it", "If True, hyperparameters for clustering # are optimized. The latter case needs a", "metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir", "perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config", "'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict:", "cross-validation for recursive feature elimination with a Random Forest mention classifier to find", "logger: :return: \"\"\" # During the hyperparameter optimization, use a fixed random seed", "[2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size for i in range(num_hidden_layers)]", "# if there are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"]", "Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i]", "cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds *", "default values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR:", "sample hyperparameters for). Testing them separately seems to make more sense to me.", "into the mention pair classifier, and feed that to RFECV. To do that,", "0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf,", "\"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS:", "our X's are not matrix-like. So we run our pipeline up to the", "return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs =", "partition on a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), #", "WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger:", "config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"]", "{len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes", "tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we allow each leaf", "\\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \\", ":param model_serialization_dir: path to the directory containing serialized models and scorers :param config_data:", "for doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)}", "if not with_clustering and (classifier is None or type(classifier) is dict): raise ValueError(\"To", ":param logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model", "type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix", "+ 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir /", "classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50)", "random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\")", "clusters into the last pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if", "\"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), #", "and evaluates :param model_serialization_dir: path to the directory containing serialized models and scorers", "influence on the results. We therefore make sure run multiple RFECV iterations with", "pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X,", "import pickle import pprint from logging import Logger from pathlib import Path from", "config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial:", "{}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict,", "{**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int)", "\\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from", "parameters. :param trial: optuna trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent',", "a randomly determined subset of all mention pairs is used. This has a", "get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None:", "hyperparameter optimization, use a fixed random seed for the Optuna sampling, CV splits", "# we use early stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4,", "Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger) -> None: \"\"\" Trains", "i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find out", "serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline,", "seems to make more sense to me. :return: classifier config \"\"\" if classifier_name", "iterations with different random seeds for # the mention pair generation and aggregate", "= Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to", "classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y", "y=train_y) return pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs", "dictionaries are modified during instantiation, so we need to deepcopy the originals to", "trial: Optuna trial :param classifier_name: The classifier to use (and sample hyperparameters for).", "each coref link type and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing", "with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the format in the pickle", "1e-7 and 1e-1 were tested, and 0.0015 # produced plots closest to the", "serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X,", "len(set(len(s) for s in supports)) == 1 assert len(set(get_dict_hash(fn) for fn in feature_names))", "determined based on a series of manual experiments with a varying number of", "with default values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {},", "best config to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data:", "config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats,", "if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: # modified_huber results in", "mention pair classifier, and feed that to RFECV. To do that, we need", "CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline", "min required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max", "['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\",", "filter extracted features so that only those from preliminary feature selection are used.", "# learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight at", "and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) #", "you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject", "# producing random and zero values. For 1e3 instances, values between 1e-7 and", "pair classifier and agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt: :param config_global:", "those. results = [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores =", "trial: optuna trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust'])", "peak around the number of non-garbage # features). Similar experiments were conducted for", "an matrix-like of shape (n_samples, n_features). This means we cannot use our pipeline", "/ f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs,", "fn in feature_names)) == 1 # collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(),", "1e4 and 1e5 instances. We interpolate between these data points. num_instances = len(actual_y)", "instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline", "oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline):", "sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1),", "= copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir", "are used. # None means \"use all features\", an empty list means no", "the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7,", "n_features). This means we cannot use our pipeline as is, # because our", ":param config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and", "cross_val_score, KFold from sklearn.pipeline import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import", "config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param model_serialization_dir: path", "n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we have very imbalanced data verbose=1)", "for cross-validation, make 6 splits at most and fall back to leave-one-out (here:", "numpy as np import optuna import pandas as pd import seaborn as sns", "optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To be", "This means we cannot use our pipeline as is, # because our X's", "fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring =", "me. :return: classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name ==", "scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines", "serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for", "\"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in parallel logger.info(f\"Training", "pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg =", "for the Optuna sampling, CV splits and classifier. optimization_random_seed = 0 # If", "Dict, Optional, List, Union, Tuple import numpy as np import optuna import pandas", "loss = \"log\" else: raise ValueError # alpha range follows the suggestions of", "pprint.pformat(best_config)) # write best config to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config,", "For 1e3 instances, values between 1e-7 and 1e-1 were tested, and 0.0015 #", "be a complete classifier configuration in the form of a dictionary.\") # -------------", "or less use in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if", "evaluates :param model_serialization_dir: path to the directory containing serialized models and scorers :param", "# if present, inject hard document clusters into the last pipeline stage (the", "and zero values. For 1e3 instances, values between 1e-7 and 1e-1 were tested,", "m in grid_scores: # number of features and CV-score for that number of", "aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else:", "needs to be stored as a string, using the dict object did not", ":param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering", "maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), #", "results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert that all results are compatible", "python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import", "results = [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results))", "classifier_name: The classifier to use (and sample hyperparameters for). Testing them separately seems", "of manual experiments with a varying number of features # producing random and", "config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0:", "CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna", "10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1,", "\"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials =", "= config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data,", "# When using oracle mention pair generation, a randomly determined subset of all", "from pathlib import Path from typing import Dict, Optional, List, Union, Tuple import", "base config to more or less use in each optimization step ------------ extractors", "find the most useful features. :param config_data: :param config_global: :param logger: :return: \"\"\"", "range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving", "modified during instantiation, so we need to deepcopy the originals to not lose", "= list(zip(*results)) # assert that all results are compatible assert len(set(len(s) for s", "if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted", "step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults()", "\"\"\" Uses optuna to sample a config with classifier hyperparameters. :param trial: Optuna", "interpolate between these data points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5]) fp", "or not classifier): raise ValueError(\"To optimize the clustering step, the 'classifier' config parameter", "import pprint from logging import Logger from pathlib import Path from typing import", "\"rb\") as f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str)", "hard document clusters into the last pipeline stage (the clustering stage) hard_document_clusters_file =", "= len(actual_y) xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances),", "= 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv,", "logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir", "write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path): #", "with a Random Forest mention classifier to find the most useful features. :param", "# min_impurity_decrease was determined based on a series of manual experiments with a", "aggregate those. results = [] for seed in range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores", "hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters)", "= copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial,", "trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth =", "pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed)", "config_data: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"]", "be an matrix-like of shape (n_samples, n_features). This means we cannot use our", "step we allow each leaf output to be. Reported to help with imbalanced", "mention pair scorer parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs", "feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because we", "cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for all", "config with classifier hyperparameters. :param trial: Optuna trial :param classifier_name: The classifier to", "config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\": config_data[\"pairs\"]}", "= pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses", "results are compatible assert len(set(len(s) for s in supports)) == 1 assert len(set(get_dict_hash(fn)", "LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST,", "\"use all features\", an empty list means no features at all! selected_features =", "dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier,", "scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes # predict in parallel", "we cannot use our pipeline as is, # because our X's are not", "can retrieve it later and use it to instantiate the best model -", "Optuna trial :param classifier_name: The classifier to use (and sample hyperparameters for). Testing", "Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\") pipelines = {} # type:", "a varying number of features # producing random and zero values. For 1e3", "(serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data:", "on a series of manual experiments with a varying number of features #", "config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering =", "f1_weighted because we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names =", "metrics[\"model\"] = i return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate", "imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores", "get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR from", "mention pair classifier and agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt: :param", "a config dictionary with clustering parameters. :param trial: optuna trial :return: config dictionary", "input the feature matrix + # labels into the mention pair classifier, and", "run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str], np.array, np.array]: # RFECV needs", "for s in supports)) == 1 assert len(set(get_dict_hash(fn) for fn in feature_names)) ==", "Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna to sample a config with", "pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline #", "for m in grid_scores: # number of features and CV-score for that number", "return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna to", "with classifier hyperparameters. :param trial: Optuna trial :param classifier_name: The classifier to use", "(and sample hyperparameters for). Testing them separately seems to make more sense to", "return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to sample a", "object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier, transform", "config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger) -> None: \"\"\" Trains n", "logger: Logger): \"\"\" To be used for hyperparameter optimization of the mention pair", "because we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline)", "i return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs", "last value to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in", "# the mention pair generation and aggregate those. results = [] for seed", "metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as", "pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None:", "= load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data,", "to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model:", "from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline", "features\", an empty list means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None)", "seaborn as sns from joblib import dump, delayed, Parallel, load from optuna import", "the mention pair classifier and agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt:", "features and CV-score for that number of features x_and_y = np.vstack([np.arange(1, len(m) +", "* cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs", "\"\"\" Runs feature selection on the EVALUATION split. Uses 10 runs of 5-fold", "EVALUATION split. Uses 10 runs of 5-fold cross-validation for recursive feature elimination with", "id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using", "'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns", "sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"]", "\"log\" else: raise ValueError # alpha range follows the suggestions of the sklearn", "therefore make sure run multiple RFECV iterations with different random seeds for #", "expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\")", "mention classifier to find the most useful features. :param config_data: :param config_global: :param", "config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates :param", "to be an matrix-like of shape (n_samples, n_features). This means we cannot use", "pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the", "xp = np.log10([1e3, 1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp)", "\"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str)", "preprocessed dataset from file with open(path, \"rb\") as f: data = pickle.load(f) return", "# produced plots closest to the optimal expected result (i.e. significant peak around", "== XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000, # we", "analysis for each coref link type and prediction examples if not is_clustering_pipeline and", "= Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\") pipelines = {} #", "python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR,", "case needs a full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier =", "= selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support, grid_scores", "instances. We interpolate between these data points. num_instances = len(actual_y) xp = np.log10([1e3,", "1e5 instances. We interpolate between these data points. num_instances = len(actual_y) xp =", "optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold", "prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y", "run our pipeline up to the point where we input the feature matrix", "model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the", "\"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif", "varying number of features # producing random and zero values. For 1e3 instances,", "if with_clustering and (type(classifier) is str or not classifier): raise ValueError(\"To optimize the", "a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3,", "0)\") def objective(trial: Trial): # config dictionaries are modified during instantiation, so we", "with_clustering and (classifier is None or type(classifier) is dict): raise ValueError(\"To optimize the", "for recursive feature elimination with a Random Forest mention classifier to find the", "Trial from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold,", "as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict,", "= {**config_model, \"pairs\": config_data[\"pairs\"]} if base_pipeline_config[\"features\"][\"extractors\"] is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed:", "(the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file =", "to be. Reported to help with imbalanced data. \"subsample\": trial.suggest_float(\"subsample\", 0.5, 1.0), \"colsample_bytree\":", "is None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config)", "= [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results plot_destination =", "np.array, np.array]: # RFECV needs X to be an matrix-like of shape (n_samples,", "grid_scores # When using oracle mention pair generation, a randomly determined subset of", "from sklearn.pipeline import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper", "choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs !=", "None: logger.warning(\"'mpg_prediction' was specified for a mention pair scoring scenario. Depending on those", "the EVALUATION split. Uses 10 runs of 5-fold cross-validation for recursive feature elimination", "from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import", "why it needs to be stored as a string, using the dict object", ":return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0", "find out if we are dealing with mention pair classification or clustering pipelines", "see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict] #", "trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return", ":param config_data: :param config_evaluate: :param config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir", "= {} # type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0])", "classifier+clustering pipelines with a given configuration. :param config_data: :param config_model: :param config_training: :param", "1e5]) fp = np.log10([0.0015, 0.00025]) min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp) random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease) logger.info(\"Running feature", "_TYPE, _KWARGS, XGBOOST, \\ _FIT_PARAMS, MLP from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from", "split. Uses 10 runs of 5-fold cross-validation for recursive feature elimination with a", "mention pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if", "EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash def load_data(path): # load preprocessed dataset from", "5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict:", "if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes", "is not None: logger.warning(\"'mpg_prediction' was specified for a mention pair scoring scenario. Depending", "= get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\":", "sklearn.pipeline import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from", "metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i,", "def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature selection on the", "from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback, PlotCallback from python.util.util import get_dict_hash", "must be of the same type (mention pair classification or clustering)\") last_pipeline_step_name =", "have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5,", "separately seems to make more sense to me. :return: classifier config \"\"\" if", "are optimized. The latter case needs a full classifier configuration, see below. with_clustering", "extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features", "1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else:", "classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on the", "else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] =", "logging import Logger from pathlib import Path from typing import Dict, Optional, List,", "for hyperparameter optimization of the mention pair classifier and agglomerative clustering. :param config_data:", "cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section", "# load and prepare data eval_data = load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation =", "assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config in", "\"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config", "data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\"", "= pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config", "\"\"\" Trains n classifier+clustering pipelines with a given configuration. :param config_data: :param config_model:", "= [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes", "with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at the end of", "tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from", "instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth", "type and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant", ":return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data =", "Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To be used", "def objective(trial: Trial): # config dictionaries are modified during instantiation, so we need", "inject hard document clusters into the last pipeline stage (the clustering stage) hard_document_clusters_file", "config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate parameters ---------------", "features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for m in", "to find the most useful features. :param config_data: :param config_global: :param logger: :return:", "the last value to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id", "5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size", "same type (mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers", "config to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict,", "containing serialized models and scorers :param config_data: :param config_evaluate: :param config_global: :param logger:", "= load(p) # find out if we are dealing with mention pair classification", "classifier, and feed that to RFECV. To do that, we need to chop", "config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To be used for hyperparameter optimization", "clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) > 1:", "train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger,", ":param config_model: :param config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR]", "and classifier, transform the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True,", "= \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError #", "create base config to more or less use in each optimization step ------------", "not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f:", "= list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis for each coref link", "selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support, grid_scores #", "\" + \"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for m in grid_scores:", "(num_hidden_layers - i - 1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config =", "list means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List]", "config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6", "clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid',", "RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1", "pair scorer parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires", "format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom", "scorer = CrossDocCorefScoring(metrics=\"all\", serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline,", "pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We", "features so that only those from preliminary feature selection are used. # None", "cross-validation, make 6 splits at most and fall back to leave-one-out (here: one", "= config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this", "= copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"]", "pipelines = {} # type: Dict[int, Pipeline] for p in model_serialization_dir.iterdir(): i =", "/ f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train}", "randomly determined subset of all mention pairs is used. This has a #", "or type(classifier) is dict): raise ValueError(\"To optimize the mention pair classifier, the 'classifier'", "between 1e-7 and 1e-1 were tested, and 0.0015 # produced plots closest to", "} # ------------- get going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data", "We interpolate between these data points. num_instances = len(actual_y) xp = np.log10([1e3, 1e5])", "of the same type (mention pair classification or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] #", "type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X", "= scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes # predict in", "and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect", "to leave-one-out (here: one instance = one partition) # if there are few", "use in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is", "assert len(support) == len(feature_names) return feature_names, support, grid_scores # When using oracle mention", ":param config_model: :param config_hyperopt: :param config_global: :param logger: :return: \"\"\" # During the", "base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] =", "hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists()", "optimized. If True, hyperparameters for clustering # are optimized. The latter case needs", "+ pprint.pformat(best_config)) # write best config to file best_config_file = serialization_dir / \"best_model_config.yaml\"", "- i - 1) * last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE:", "that, we need to chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] =", "from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME", "the Barhom et al. system, so we split on underscores and pick the", "y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set", "------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() #", "= load_data(eval_data_path) X, y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\":", "leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step", "a quadratically smoothed SVM with gamma = 2 loss = \"modified_huber\" elif classifier_name", "range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\": \"adam\", \"learning_rate_init\":", "is_clustering_pipeline = True # if present, inject hard document clusters into the last", "import Pipeline from tabulate import tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input", "config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\":", "those from preliminary feature selection are used. # None means \"use all features\",", "are not representative. I hope you know what you're doing.\") elif last_pipeline_step_name ==", "TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline", "features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" /", "== 1 assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1 # collect selections", "a further partition on a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3,", "CV-score for that number of features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose()", "= False # collect mention pair scorer parameters if not \"pairs\" in config_data:", "do that, we need to chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"]", "= config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate parameters --------------- if not", "pd import seaborn as sns from joblib import dump, delayed, Parallel, load from", "# load preprocessed dataset from file with open(path, \"rb\") as f: data =", "load preprocessed dataset from file with open(path, \"rb\") as f: data = pickle.load(f)", "1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config =", "X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\")", "more or less use in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None)", "num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics =", "parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines", "if config_training[\"analyze_feature_importance\"]: logger.info(\"Analyzing feature importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir", "None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter", "config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction'", "scoring=\"f1_weighted\", # use f1_weighted because we have very imbalanced data verbose=1) selector.fit(actual_X, actual_y)", "PlotCallback from python.util.util import get_dict_hash def load_data(path): # load preprocessed dataset from file", "{ LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {},", "don't ask me why it needs to be stored as a string, using", "high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted',", "into the last pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file", "p in model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p)", "raise ValueError(\"Could not identify last pipeline step.\") # load and prepare data eval_data", "folder for each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer", "[\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics", "detailed prediction analysis for each coref link type and prediction examples if not", "logger.info(\"Running feature selection...\") selector = RFECV(estimator=random_forest_clf, n_jobs=config_global[MAX_CORES], cv=cv, scoring=\"f1_weighted\", # use f1_weighted because", ":param config_hyperopt: :param config_global: :param logger: :return: \"\"\" # During the hyperparameter optimization,", "if we are dealing with mention pair classification or clustering pipelines last_pipeline_step_names =", "cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = []", "typing import Dict, Optional, List, Union, Tuple import numpy as np import optuna", "1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\":", "as a string, using the dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) #", "= serialization_dir / \"serialized_models\" model_dir.mkdir(exist_ok=True) for i, p in enumerate(pipelines): dump(p, model_dir /", "trial.suggest_float(\"colsample_bytree\", 0.5, 1.0), \"colsample_bylevel\": trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for imbalanced", "trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}}", "representative. I hope you know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline", "document clusters into the last pipeline stage (the clustering stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"]", "mention pairs is used. This has a # big influence on the results.", "trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete',", "at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\",", "the number of instances to obtain a useful feature selection result. # min_impurity_decrease", "the form of a dictionary.\") # ------------- create base config to more or", "= instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return", "classifier_config def sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to sample a config", "folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial: Trial): #", "Uses 10 runs of 5-fold cross-validation for recursive feature elimination with a Random", "trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier, transform the features pipeline, scoring", "/ \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir", "produced plots closest to the optimal expected result (i.e. significant peak around the", "None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int = 6) ->", "more sense to me. :return: classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]:", "json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best", "\"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str], np.array,", "classifier to use (and sample hyperparameters for). Testing them separately seems to make", "\"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int = 6)", "mention pair classification or clustering pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()}", "pipelines last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()} if len(last_pipeline_step_names) > 1: raise", "selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert", "config_model: Dict, config_hyperopt: Dict, config_global: Dict, logger: Logger): \"\"\" To be used for", "we need to chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed", "instances to obtain a useful feature selection result. # min_impurity_decrease was determined based", "oracle mention pair generation, a randomly determined subset of all mention pairs is", "with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def", "means we cannot use our pipeline as is, # because our X's are", "from optuna import Trial from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from", "on a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum", "cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits}", "runs of 5-fold cross-validation for recursive feature elimination with a Random Forest mention", "model_serialization_dir.iterdir(): i = int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find", "logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else:", "% cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds", "seeds for # the mention pair generation and aggregate those. results = []", "partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs", "pair generation and aggregate those. results = [] for seed in range(7): results.append(run_rfecv_iteration(seed))", "supports)) == 1 assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1 # collect", "\"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\",", "= f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks = [] if \"early_stopping\" in config_hyperopt:", "cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting", "df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\"", "0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion,", "During the hyperparameter optimization, use a fixed random seed for the Optuna sampling,", "RFECV iterations with different random seeds for # the mention pair generation and", "config parameter must be the name of the classifier to optimize.\") if with_clustering", "\"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores", "parameters --------------- if not with_clustering and (classifier is None or type(classifier) is dict):", "{}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt: Dict, config_global: Dict,", "use our pipeline as is, # because our X's are not matrix-like. So", "trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required to make a further partition", "classification are optimized. If True, hyperparameters for clustering # are optimized. The latter", "experiments with a varying number of features # producing random and zero values.", "step, the 'classifier' config parameter must be a complete classifier configuration in the", "= get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features so that only those", "\"\"\" Returns config section for all feature extractors with default values. :return: \"\"\"", "matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv =", "last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False", "None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": {", "Uses optuna to sample a config with classifier hyperparameters. :param trial: Optuna trial", "classifier config \"\"\" if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]: if classifier_name == SVC_HUBER: #", "1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance", "evaluation results are not representative. I hope you know what you're doing.\") elif", "\"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for m in grid_scores: # number", "base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config", "closest to the optimal expected result (i.e. significant peak around the number of", "pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed", "up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger,", "config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int = 6) -> Tuple[List[str], np.array, np.array]:", "Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\") pipelines", "as f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) ->", "logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()]", "eval_y) metrics[\"model\"] = i return metrics, outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)}", "LOGISTIC_REGRESSION: loss = \"log\" else: raise ValueError # alpha range follows the suggestions", "12), # max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction", "= { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\":", "link type and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\")", "((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices, expect idling", ":param trial: Optuna trial :param classifier_name: The classifier to use (and sample hyperparameters", "range(7): results.append(run_rfecv_iteration(seed)) feature_names, supports, grid_scores = list(zip(*results)) # assert that all results are", "clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not", "# number of features and CV-score for that number of features x_and_y =", "this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\",", "outputs into separate folder for each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True)", "metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\" Runs feature selection on", "config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config", "LEMMA_EXTR: {}, TFIDF_EXTR: {}, TIME_EXTR: {}, LOCATION_EXTR: {}, SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR:", "# If False, hyperparameters for mention pair classification are optimized. If True, hyperparameters", "optimize the mention pair classifier, the 'classifier' config parameter must be the name", "splits and classifier. optimization_random_seed = 0 # If False, hyperparameters for mention pair", "to chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline, scoring", "a complete classifier configuration in the form of a dictionary.\") # ------------- create", "scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair scorer", "optuna to sample a config dictionary with clustering parameters. :param trial: optuna trial", "are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES]", "did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier, transform the", "int, n_splits: int = 6) -> Tuple[List[str], np.array, np.array]: # RFECV needs X", "i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which fits the pipeline if is_clustering_pipeline: scorer = CrossDocCorefScoring(metrics=\"all\",", "\"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int,", ":return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from", "def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring", "in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None:", "int = 6) -> Tuple[List[str], np.array, np.array]: # RFECV needs X to be", "zero values. For 1e3 instances, values between 1e-7 and 1e-1 were tested, and", "pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al. system,", "config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate", "collect scores df_grid_scores = [] for m in grid_scores: # number of features", "number of features # producing random and zero values. For 1e3 instances, values", "= config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str, Dict] # ------------- validate parameters", "obtain a useful feature selection result. # min_impurity_decrease was determined based on a", "sample_clustering_config_with_optuna(trial: Trial) -> Dict: \"\"\" Uses optuna to sample a config dictionary with", "file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al. system, so", "hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size for i", "recommended to use for imbalanced datasets (which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0,", "\"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def sample_clustering_config_with_optuna(trial:", "python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR,", "logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger: Logger): \"\"\"", "------------- create base config to more or less use in each optimization step", "Pass this to filter extracted features so that only those from preliminary feature", "is None or type(classifier) is dict): raise ValueError(\"To optimize the mention pair classifier,", "optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial", "doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True # if present, inject hard", "the trial so that we can retrieve it later and use it to", "config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\")", "the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), # learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10),", "= config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and", "= random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" /", "to be used with the Barhom et al. system, so we split on", "oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at most and fall back to", "config_data: Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and", "-> Dict: \"\"\" Uses optuna to sample a config with classifier hyperparameters. :param", "pandas as pd import seaborn as sns from joblib import dump, delayed, Parallel,", "metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by", "import Trial from optuna.samplers import TPESampler from sklearn.feature_selection import RFECV from sklearn.model_selection import", "given configuration. :param config_data: :param config_model: :param config_training: :param config_global: :param logger: :return:", "cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits *", "classifier and agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt: :param config_global: :param", "we run our pipeline up to the point where we input the feature", "= copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed pipeline, scoring = instantiate_pipeline(logger, pipeline_config, with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir", "mention pair scoring scenario. Depending on those parameters, evaluation results are not representative.", "in config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"]", "oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation,", "empty list means no features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type:", "be the name of the classifier to optimize.\") if with_clustering and (type(classifier) is", "seed for the Optuna sampling, CV splits and classifier. optimization_random_seed = 0 #", "import dump, delayed, Parallel, load from optuna import Trial from optuna.samplers import TPESampler", "{\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP: num_hidden_layers", "models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs) if config_training[\"analyze_feature_importance\"]:", "to more or less use in each optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\",", "{\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1), \"linkage_method\": trial.suggest_categorical(\"linkage_method\", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']),", "So we run our pipeline up to the point where we input the", "== 1 # collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features", "raise ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config =", "{\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def", "doc_id in cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters", "* last_hidden_layer_size for i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes),", "doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": {", "classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000,", "classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_", "tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required to make", "a string, using the dict object did not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate", "is_clustering_pipeline: group_by = [\"meta-doc\", \"metric\"] else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\",", "the feature matrix + # labels into the mention pair classifier, and feed", "= config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed,", "means \"use all features\", an empty list means no features at all! selected_features", "for). Testing them separately seems to make more sense to me. :return: classifier", "all feature extractors with default values. :return: \"\"\" return { LEMMA_EXTR: {}, TFIDF_EXTR:", "with_clustering=with_clustering, scorer_should_return_single_scalar=False, serialization_dir=serialization_dir / \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train", "ValueError(\"To optimize the clustering step, the 'classifier' config parameter must be a complete", "for 1e4 and 1e5 instances. We interpolate between these data points. num_instances =", "False}} elif classifier_name == MLP: num_hidden_layers = trial.suggest_int(\"num_hidden_layers\", 1, 2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\",", "python.util.util import get_dict_hash def load_data(path): # load preprocessed dataset from file with open(path,", "configuration in the form of a dictionary.\") # ------------- create base config to", "\"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir /", "trial: \" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config to", "selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\", data=df_grid_scores) fig", "This has a # big influence on the results. We therefore make sure", "= config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best", "pipeline): # write scoring outputs into separate folder for each model i_serialization_dir =", "in a quadratically smoothed SVM with gamma = 2 loss = \"modified_huber\" elif", "1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name ==", "= get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"] = random_seed", "# Pass this to filter extracted features so that only those from preliminary", "shape (n_samples, n_features). This means we cannot use our pipeline as is, #", "cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on the number", "assert that all results are compatible assert len(set(len(s) for s in supports)) ==", "python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from", "logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert len(support) ==", "'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() ->", "(here: one instance = one partition) # if there are few partitions cv_num_splits", "The classifier to use (and sample hyperparameters for). Testing them separately seems to", "None means \"use all features\", an empty list means no features at all!", "# train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for", "from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR,", "plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\", data=df_grid_scores) fig = ax.get_figure()", "features at all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"]", "sample a config with classifier hyperparameters. :param trial: Optuna trial :param classifier_name: The", "# predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline) for", "callbacks.append(PlotCallback(serialization_dir=serialization_dir / \"plots\")) sampler = TPESampler(seed=optimization_random_seed) study = optuna.create_study(sampler=sampler, direction=\"maximize\") optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds()", "f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str) return metrics_agg def feature_selection(config_data: Dict, config_global: Dict, logger:", "-> Dict: \"\"\" Uses optuna to sample a config dictionary with clustering parameters.", "optimization step ------------ extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors =", "= get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) def predict_and_evaluate(i, pipeline): # write scoring outputs into", "with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed)", "to instantiate the best model - # don't ask me why it needs", "last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers - i -", "feature_names, supports, grid_scores = list(zip(*results)) # assert that all results are compatible assert", "depending on the number of instances to obtain a useful feature selection result.", "scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i", "sklearn.feature_selection import RFECV from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold from sklearn.pipeline import Pipeline", "with_clustering and (type(classifier) is str or not classifier): raise ValueError(\"To optimize the clustering", "config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] data = load_data(eval_data_path) X, y =", "trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta step we allow each leaf output to", "config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y =", "for i, p in enumerate(pipelines): dump(p, model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data:", "smoothed SVM with gamma = 2 loss = \"modified_huber\" elif classifier_name == LOGISTIC_REGRESSION:", "# find out if we are dealing with mention pair classification or clustering", "= get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at most", "scoring scenario. Depending on those parameters, evaluation results are not representative. I hope", "from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from python.util.optuna import EarlyStoppingCallback,", "= trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if not cluster_criterion == 'inconsistent'", "and CV-score for that number of features x_and_y = np.vstack([np.arange(1, len(m) + 1),", "# write best config to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file)", "was specified for a mention pair scoring scenario. Depending on those parameters, evaluation", "Tuple[List[str], np.array, np.array]: # RFECV needs X to be an matrix-like of shape", "and pick the last value to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1]", "model_dir / f\"{i}.pipeline.joblib\") def evaluate(model_serialization_dir: Path, config_data: Dict, config_evaluate: Dict, config_global: Dict, logger:", "logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data", "classifier. optimization_random_seed = 0 # If False, hyperparameters for mention pair classification are", "= serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict,", "Dict, config_global: Dict, logger: Logger): \"\"\" To be used for hyperparameter optimization of", "{\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } #", "\"\"\" Uses optuna to sample a config dictionary with clustering parameters. :param trial:", "None: base_pipeline_config[\"features\"][\"extractors\"] = get_feature_extractors_config_with_all_and_defaults() def fit_save_and_report(random_seed: int) -> Pipeline: pipeline_config = copy.deepcopy(base_pipeline_config) pipeline_config[\"random_seed\"]", "write best config to file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def", "metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" +", "list(zip(*results)) # assert that all results are compatible assert len(set(len(s) for s in", ":param config_data: :param config_model: :param config_training: :param config_global: :param logger: :return: \"\"\" serialization_dir", "transform the features pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=with_clustering, use_caching=True, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir /", "2) last_hidden_layer_size = trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers - i", "one partition) # if there are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats", "6 splits at most and fall back to leave-one-out (here: one instance =", "int(p.stem.split(\".\")[0]) if \"\".join(p.suffixes) == \".pipeline.joblib\": pipelines[i] = load(p) # find out if we", "type: Optional[List] pairs_config = config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors,", "({len(hard_document_clusters)} clusters given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify", "the number of non-garbage # features). Similar experiments were conducted for 1e4 and", "to not lose them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not", "else: group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True)", "= config_hyperopt[\"cv_num_repeats\"] cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats)", "We set min_impurity_decrease depending on the number of instances to obtain a useful", "copy.deepcopy(base_config) if with_clustering: assert type(classifier) is not str config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] =", "'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults()", "config parameter must be a complete classifier configuration in the form of a", "have very imbalanced data verbose=1) selector.fit(actual_X, actual_y) logger.info(\"Done.\") feature_names = get_feature_names_from_pipeline(pipeline) support =", "True, \"early_stopping\": True, \"n_iter_no_change\": 5, \"validation_fraction\": 0.1}} else: raise ValueError return classifier_config def", "config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the config in the trial so that", "to filter extracted features so that only those from preliminary feature selection are", "topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al. system, so we split", "\"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\":", "of instances to obtain a useful feature selection result. # min_impurity_decrease was determined", "clustering. :param config_data: :param config_model: :param config_hyperopt: :param config_global: :param logger: :return: \"\"\"", "in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for p in pipelines.values():", "1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter choices,", "and loading model pipelines from disk.\") pipelines = {} # type: Dict[int, Pipeline]", "# the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with", "extractors = config_model[\"features\"].get(\"extractors\", None) if extractors is None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass", "train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits", "= pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ #", "else: raise ValueError # alpha range follows the suggestions of the sklearn documentation", "import Dict, Optional, List, Union, Tuple import numpy as np import optuna import", "if there are few partitions cv_num_splits = min(6, len(train_X)) cv_num_repeats = config_hyperopt[\"cv_num_repeats\"] cv_n_jobs", "optuna_timeout_seconds = pd.to_timedelta(config_hyperopt[\"timeout\"]).total_seconds() optuna_n_trials = config_hyperopt[\"n_trials\"] study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial", "of shape (n_samples, n_features). This means we cannot use our pipeline as is,", "!= 0)\") def objective(trial: Trial): # config dictionaries are modified during instantiation, so", "feature_names = get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names)", ":param config_data: :param config_model: :param config_hyperopt: :param config_global: :param logger: :return: \"\"\" #", "the mention pair classifier, the 'classifier' config parameter must be the name of", "is PredictOnTransformClassifierWrapper random_forest_clf = classifier_wrapper.classifier_ # obtain feature matrix and labels conflated_X =", "/ \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f:", "\"n_estimators\": 1000, # we use early stopping, so this is the maximum \"learning_rate\":", "------------- get going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"])", "hyperparameters for mention pair classification are optimized. If True, hyperparameters for clustering #", "scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv =", "coref link type and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]: logger.info(f\"Performing prediction", "config_data: :param config_evaluate: :param config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir =", "= serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\", data=df_grid_scores) fig = ax.get_figure() fig.savefig(str(plot_destination))", "stage) hard_document_clusters_file = config_evaluate[\"hard_document_clusters_file\"] if hard_document_clusters_file is not None: hard_document_clusters_file = Path(hard_document_clusters_file) assert", "trial.suggest_int(\"last_hidden_layer_size\", 5, 50) hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1) *", "= get_feature_names_from_pipeline(pipeline) support = selector.support_ grid_scores = selector.grid_scores_ assert len(support) == len(feature_names) return", "scoring outputs into separate folder for each model i_serialization_dir = serialization_dir / str(i)", "1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST:", "logger.warning(\"'mpg_prediction' was specified for a mention pair scoring scenario. Depending on those parameters,", "= config_data[\"pairs\"] base_config = {\"random_seed\": optimization_random_seed, \"features\": { \"extractors\": extractors, \"selected_features\": selected_features },", "elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\": 1000,", "importance\") analyze_feature_importance(pipelines, serialization_dir, logger) logger.info(\"Saving pipelines to disk\") model_dir = serialization_dir / \"serialized_models\"", "scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at the end of the", "List, Union, Tuple import numpy as np import optuna import pandas as pd", "def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict: \"\"\" Uses optuna to sample a", ":return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] num_models_to_train = config_training[\"num_models_to_train\"] with_clustering = config_training[\"with_clustering\"] train_data =", "tabulate from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import", "None: extractors = get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features so that", "file with open(path, \"rb\") as f: data = pickle.load(f) return data def sample_classifier_config_with_optuna(trial:", "\"solver\": \"adam\", \"learning_rate_init\": trial.suggest_loguniform(\"learning_rate_init\", 1e-4, 1e-1), \"max_iter\": 1000, \"shuffle\": True, \"early_stopping\": True, \"n_iter_no_change\":", "y = get_X_and_y_for_pipeline(logger, data, doc_partitioning=None, oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS:", "is, # because our X's are not matrix-like. So we run our pipeline", "callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" + repr(best_trial)) logger.info(\"Best", "= RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0)", "'classifier' config parameter must be the name of the classifier to optimize.\") if", "weight at a child \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12), # max tree depth \"gamma\":", "from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\ SENTENCE_EMBEDDING_EXTR,", "n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1 logger.info(\"Starting optimization.\") callbacks =", "i in range(num_hidden_layers)] classifier_config = {_TYPE: \"MLPClassifier\", _KWARGS: {\"hidden_layer_sizes\": tuple(hidden_layer_sizes), \"activation\": \"relu\", \"solver\":", "If False, hyperparameters for mention pair classification are optimized. If True, hyperparameters for", "given).\") for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline", "a series of manual experiments with a varying number of features # producing", "5}} elif classifier_name == XGBOOST: classifier_config = {_TYPE: \"ConvenientXGBClassifier\", _KWARGS: {\"n_jobs\": 1, \"n_estimators\":", "selected_features }, \"pairs\": pairs_config } # ------------- get going with optimization now ---------------", "write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger)", "group_by = [\"metric\"] metrics_agg = metrics.groupby(group_by)[[\"f1\", \"precision\", \"recall\"]].describe(percentiles=[]) metrics_agg.drop(columns=[\"count\", \"50%\"], level=1, inplace=True) #", "raise ValueError(\"All pipelines must be of the same type (mention pair classification or", "None: hard_document_clusters_file = Path(hard_document_clusters_file) assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters", "file(s) selections.to_csv(str(serialization_dir / \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features:", "not with_clustering and (classifier is None or type(classifier) is dict): raise ValueError(\"To optimize", "labels into the mention pair classifier, and feed that to RFECV. To do", "outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"] = i return metrics, outcomes # predict", "originals to not lose them config = copy.deepcopy(base_config) if with_clustering: assert type(classifier) is", "et al. system, so we split on underscores and pick the last value", "index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str = tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\")", "and classifier. optimization_random_seed = 0 # If False, hyperparameters for mention pair classification", "metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir / \"metrics_aggregated.csv\", index=True) metrics_agg_str =", "collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) >", "-> Dict: \"\"\" Returns config section for all feature extractors with default values.", "study.optimize(objective, n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \"", "are compatible assert len(set(len(s) for s in supports)) == 1 assert len(set(get_dict_hash(fn) for", "used with the Barhom et al. system, so we split on underscores and", "= random_seed pipeline, scoring = instantiate_pipeline(logger, config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") #", "serialization_dir = config_global[RUN_WORKING_DIR] train_data = load_data(config_data[\"train_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X,", "index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir /", "pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values # write to file(s) selections.to_csv(str(serialization_dir", "for each coref link type and prediction examples if not is_clustering_pipeline and config_evaluate[\"perform_prediction_analysis\"]:", "pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"] df_grid_scores.to_csv(str(serialization_dir / \"grid_scores.csv\")) # plot feature selection results", "import Logger from pathlib import Path from typing import Dict, Optional, List, Union,", "cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config = {\"threshold\": trial.suggest_uniform(\"threshold\", 0, 1),", "/ \"pipeline\" / f\"seed_{random_seed:03}\") pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in parallel", "loss, \"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}}", "the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for cluster in", "it later and use it to instantiate the best model - # don't", "level=1, inplace=True) # write metrics to disk metrics.to_csv(serialization_dir / \"metrics_unaggregated.csv\", index=True) metrics_agg.to_csv(serialization_dir /", "we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS:", "Dict, config_evaluate: Dict, config_global: Dict, logger: Logger) -> pd.DataFrame: \"\"\" Predicts and evaluates", "that number of features x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores", "need to chop up the pipeline. config = copy.deepcopy(config_base) config[\"random_seed\"] = random_seed pipeline,", "= pickle.load(f) # the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be", "Barhom et al. system, so we split on underscores and pick the last", "outcomes # predict in parallel logger.info(f\"Predicting/evaluating {len(pipelines)} separate models...\") jobs = [delayed(predict_and_evaluate)(i, pipeline)", "all! selected_features = config_model[\"features\"].get(\"selected_features\", None) # type: Optional[List] pairs_config = config_data[\"pairs\"] base_config =", "\"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", }, _FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\":", "max tree depth \"gamma\": trial.suggest_loguniform(\"gamma\", 1e-3, 1e0), # Minimum loss reduction required to", "all results are compatible assert len(set(len(s) for s in supports)) == 1 assert", "= 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config =", "= config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config = {**config_model, \"pairs\":", "The latter case needs a full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"]", "or clustering)\") last_pipeline_step_name = list(last_pipeline_step_names)[0] # prepare scorers if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline", "doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation)", "and 1e5 instances. We interpolate between these data points. num_instances = len(actual_y) xp", "get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed: int, n_splits: int =", "trial.suggest_float(\"colsample_bylevel\", 0.5, 1.0), # recommended to use for imbalanced datasets (which we definitely", "pairs_config } # ------------- get going with optimization now --------------- serialization_dir = config_global[RUN_WORKING_DIR]", "pick the last value to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for", "complete classifier configuration in the form of a dictionary.\") # ------------- create base", "metrics, outcomes = list(zip(*metrics_and_outcomes)) # for classifiers only: detailed prediction analysis for each", "not work trial.set_user_attr(\"config\", json.dumps(config)) # instantiate feature pipeline and classifier, transform the features", "\"pipeline\") # remove the classifier at the end of the pipeline classifier_wrapper =", "trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight at a child \"max_depth\": trial.suggest_int(\"max_depth\",", "Path from typing import Dict, Optional, List, Union, Tuple import numpy as np", "= tabulate(metrics_agg, headers=\"keys\") with (serialization_dir / \"metrics_aggregated_pretty.txt\").open(\"w\") as f: f.write(metrics_agg_str) logger.info(\"\\n\" + metrics_agg_str)", "1 # collect selections in DataFrame selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name=\"feature-name\")) selected_features =", "with a varying number of features # producing random and zero values. For", "of a dictionary.\") # ------------- create base config to more or less use", "clustering parameters. :param trial: optuna trial :return: config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\",", "at most and fall back to leave-one-out (here: one instance = one partition)", "best model - # don't ask me why it needs to be stored", "+ repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config to file best_config_file", "s in supports)) == 1 assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1", "doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) # for cross-validation, make 6 splits at most and fall back", "['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']), \"cluster_criterion\": cluster_criterion, \"cluster_depth\": cluster_depth} return clustering_config", "config_global: Dict, logger: Logger) -> None: \"\"\" Trains n classifier+clustering pipelines with a", "if classifier_name == SVC_HUBER: # modified_huber results in a quadratically smoothed SVM with", "random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on the number of instances to", "actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease", "\" + repr(best_trial)) logger.info(\"Best config:\\n\" + pprint.pformat(best_config)) # write best config to file", "mention pair classifier, the 'classifier' config parameter must be the name of the", "CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)\") def objective(trial:", "feed that to RFECV. To do that, we need to chop up the", "feature matrix and labels conflated_X = pipeline.fit_transform(X, y) actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X) cv", "classifier, the 'classifier' config parameter must be the name of the classifier to", "the mention pair generation and aggregate those. results = [] for seed in", "if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair scorer parameters", "'classifier' config parameter must be a complete classifier configuration in the form of", "import optuna import pandas as pd import seaborn as sns from joblib import", "get_feature_names_from_pipeline, \\ analyze_feature_importance from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \\ CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import", "train(config_data: Dict, config_model: Dict, config_training: Dict, config_global: Dict, logger: Logger) -> None: \"\"\"", "reduction required to make a further partition on a leaf node of the", "# store the config in the trial so that we can retrieve it", "of 5-fold cross-validation for recursive feature elimination with a Random Forest mention classifier", "get_feature_extractors_config_with_all_and_defaults() # Pass this to filter extracted features so that only those from", "Logger): \"\"\" Runs feature selection on the EVALUATION split. Uses 10 runs of", "for each model i_serialization_dir = serialization_dir / str(i) i_serialization_dir.mkdir(exist_ok=True) # instantiate scorer which", "\"features\": { \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } # ------------- get", "from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \\ PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR,", "results in a quadratically smoothed SVM with gamma = 2 loss = \"modified_huber\"", "clustering step, the 'classifier' config parameter must be a complete classifier configuration in", "= cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return mean_f1", "deepcopy the originals to not lose them config = copy.deepcopy(base_config) if with_clustering: assert", "ValueError(\"Scoring mention pairs requires a 'pairs' config.\") config_pairs = config_data[\"pairs\"] mpg_prediction_config = config_pairs.pop(\"mpg_prediction\")", "needs a full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"]", "only: detailed prediction analysis for each coref link type and prediction examples if", "config, with_clustering=False, scorer_should_return_single_scalar=True, serialization_dir=serialization_dir / \"pipeline\") # remove the classifier at the end", "classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier = config_model[\"classifier\"] # type: Union[str,", "(which we definitely have) \"scale_pos_weight\": trial.suggest_loguniform(\"scale_pos_weight\", 1.0, 10), \"objective\": \"binary:logistic\", \"eval_metric\": \"logloss\", },", "selection result. # min_impurity_decrease was determined based on a series of manual experiments", "be used with the Barhom et al. system, so we split on underscores", "= selector.grid_scores_ assert len(support) == len(feature_names) return feature_names, support, grid_scores # When using", "\"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"]", "\"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight at a child \"max_depth\":", "[] for m in grid_scores: # number of features and CV-score for that", "SENTENCE_EMBEDDING_EXTR: {}, ACTION_PHRASE_EMBEDDING_EXTR: {}, WIKIDATA_EMBEDDING_EXTR: {} } def optimize_hyperparameters(config_data: Dict, config_model: Dict, config_hyperopt:", "feature elimination with a Random Forest mention classifier to find the most useful", "latter case needs a full classifier configuration, see below. with_clustering = config_hyperopt[\"with_clustering\"] classifier", "np.vstack([np.arange(1, len(m) + 1), m]).transpose() df_grid_scores.append(x_and_y) df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores)) df_grid_scores.columns = [\"num-features\", \"weighted-f1\"]", "cluster} for cluster in hard_document_clusters] logger.info(f\"Using hard document clustering ({len(hard_document_clusters)} clusters given).\") for", "you know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME: is_clustering_pipeline = True #", "sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] = sample_classifier_config_with_optuna(trial, classifier) # store the", "import CrossDocCorefScoring, MentionPairScoring from python.pipeline import RUN_WORKING_DIR, MAX_CORES from python.util.config import write_config from", "agglomerative clustering. :param config_data: :param config_model: :param config_hyperopt: :param config_global: :param logger: :return:", "file best_config_file = serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict,", "optimization_random_seed = 0 # If False, hyperparameters for mention pair classification are optimized.", "a leaf node of the tree. \"max_delta_step\": trial.suggest_loguniform(\"max_delta_step\", 1e-3, 1e2), # Maximum delta", "PredictOnTransformClassifierWrapper from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \\", "pipeline.fit(X=train_X, y=train_y) return pipeline # train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\")", "generation, a randomly determined subset of all mention pairs is used. This has", "and hard_document_clusters_file.is_file() with hard_document_clusters_file.open(\"rb\") as f: hard_document_clusters = pickle.load(f) # the format in", "Dict: \"\"\" Uses optuna to sample a config with classifier hyperparameters. :param trial:", "1e0), # Minimum loss reduction required to make a further partition on a", "selection are used. # None means \"use all features\", an empty list means", "jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()] metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs) metrics,", "and 1e-1 were tested, and 0.0015 # produced plots closest to the optimal", "train pipelines in parallel logger.info(f\"Training {num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed", "\"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif classifier_name == XGBOOST: classifier_config", "use early stopping, so this is the maximum \"learning_rate\": trial.suggest_loguniform(\"learning_rate\", 1e-4, 1e0), #", "optuna to sample a config with classifier hyperparameters. :param trial: Optuna trial :param", "features). Similar experiments were conducted for 1e4 and 1e5 instances. We interpolate between", "the end of the pipeline classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper assert type(classifier_wrapper)", "parameters if not \"pairs\" in config_data: raise ValueError(\"Scoring mention pairs requires a 'pairs'", "config dictionary \"\"\" cluster_criterion = trial.suggest_categorical(\"cluster_criterion\", ['inconsistent', 'distance', 'maxclust']) cluster_depth = 0 if", "{ \"extractors\": extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } # ------------- get going", "copy import json import pickle import pprint from logging import Logger from pathlib", "/ \"selected_features_unaggregated.csv\")) with (serialization_dir / \"selected_features.txt\").open(\"w\") as f: f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" +", "quadratically smoothed SVM with gamma = 2 loss = \"modified_huber\" elif classifier_name ==", "100}}, \"features\": { \"extractors\": get_feature_extractors_config_with_all_and_defaults(), \"selected_features\": None }, \"pairs\": config_data[\"pairs\"] } def run_rfecv_iteration(random_seed:", "different random seeds for # the mention pair generation and aggregate those. results", "KFold(n_splits=n_splits, random_state=random_seed, shuffle=True) # We set min_impurity_decrease depending on the number of instances", "extractors, \"selected_features\": selected_features }, \"pairs\": pairs_config } # ------------- get going with optimization", "required to make a further partition on a leaf node of the tree.", "Uses optuna to sample a config dictionary with clustering parameters. :param trial: optuna", "of features and CV-score for that number of features x_and_y = np.vstack([np.arange(1, len(m)", "cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int(\"cluster_depth\", low=1, high=10) clustering_config", "modified_huber results in a quadratically smoothed SVM with gamma = 2 loss =", "dataset from file with open(path, \"rb\") as f: data = pickle.load(f) return data", "not representative. I hope you know what you're doing.\") elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME:", "a mention pair scoring scenario. Depending on those parameters, evaluation results are not", "False, hyperparameters for mention pair classification are optimized. If True, hyperparameters for clustering", "serialization_dir / \"best_model_config.yaml\" write_config(best_config, best_config_file) def train(config_data: Dict, config_model: Dict, config_training: Dict, config_global:", "a fixed random seed for the Optuna sampling, CV splits and classifier. optimization_random_seed", "to obtain the document id hard_document_clusters = [{doc_id.split(\"_\")[-1] for doc_id in cluster} for", "all mention pairs is used. This has a # big influence on the", "analysis\") num_samples_per_quadrant = config_evaluate[\"num_samples_per_quadrant\"] perform_prediction_analysis(dataset=eval_data, outcomes=outcomes, num_samples_per_quadrant=num_samples_per_quadrant, serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics", "get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for all feature extractors with default", "n_trials=optuna_n_trials, timeout=optuna_timeout_seconds, callbacks=callbacks) best_trial = study.best_trial best_config = json.loads(best_trial.user_attrs[\"config\"]) logger.info(\"Best trial: \" +", "serialization_dir=serialization_dir) # aggregate metrics: min/max/mean/std metrics = pd.concat(metrics) if is_clustering_pipeline: group_by = [\"meta-doc\",", "f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X, y=train_y, n_jobs=cv_n_jobs, cv=cv, scoring=scoring, verbose=0) mean_f1 = f1_scores_cv.mean() return", "= load_data(config_data[\"eval_data_path\"]) doc_partitioning = config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] eval_X, eval_y = get_X_and_y_for_pipeline(logger, eval_data,", "serialization_dir=i_serialization_dir) else: scorer = MentionPairScoring(mpg_prediction_config, serialization_dir=i_serialization_dir) metrics, outcomes = scorer(pipeline, eval_X, eval_y) metrics[\"model\"]", "cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation", "sns from joblib import dump, delayed, Parallel, load from optuna import Trial from", "oracle_mention_pair_generation=oracle_mention_pair_generation) config_base = { \"classifier\": {_TYPE: \"RandomForest\", _KWARGS: {\"n_estimators\": 100}}, \"features\": { \"extractors\":", "suggestions of the sklearn documentation classifier_config = {_TYPE: \"SGDClassifier\", _KWARGS: {\"loss\": loss, \"alpha\":", "> 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0: logger.warning(f\"Inefficient cross-validation parameter", "config_evaluate: :param config_global: :param logger: :return: metrics Dataframe \"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding", "\"\"\" serialization_dir = Path(config_global[RUN_WORKING_DIR]) logger.info(\"Finding and loading model pipelines from disk.\") pipelines =", "for p in pipelines.values(): p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters) else: raise ValueError(\"Could not identify last pipeline step.\")", "split on underscores and pick the last value to obtain the document id", "/ \"pipeline\" / f\"trial_{trial.number:03}\") cv = RepeatedKFold(n_splits=cv_num_splits, n_repeats=cv_num_repeats, random_state=optimization_random_seed) f1_scores_cv = cross_val_score(estimator=pipeline, X=train_X,", "\"cluster_depth\": cluster_depth} return clustering_config def get_feature_extractors_config_with_all_and_defaults() -> Dict: \"\"\" Returns config section for", "logger: Logger) -> None: \"\"\" Trains n classifier+clustering pipelines with a given configuration.", "parameter must be a complete classifier configuration in the form of a dictionary.\")", "1e3 instances, values between 1e-7 and 1e-1 were tested, and 0.0015 # produced", "= config_pairs.pop(\"mpg_prediction\") if mpg_prediction_config is not None: logger.warning(\"'mpg_prediction' was specified for a mention", "cv_n_jobs = config_global[MAX_CORES] if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs)", "learning rate \"min_child_weight\": trial.suggest_float(\"min_child_weight\", 1, 10), # min required instance weight at a", "CLASSIFIER_PIPELINE_STEP_NAME: is_clustering_pipeline = False # collect mention pair scorer parameters if not \"pairs\"", "models and scorers :param config_data: :param config_evaluate: :param config_global: :param logger: :return: metrics", "\"alpha\": trial.suggest_loguniform(\"alpha\", 1e-7, 1e-1), \"max_iter\": 1000, \"early_stopping\": True, \"validation_fraction\": 0.1, \"n_iter_no_change\": 5}} elif", "Tuple import numpy as np import optuna import pandas as pd import seaborn", "config[\"classifier\"] = copy.deepcopy(classifier) config[\"clustering\"] = sample_clustering_config_with_optuna(trial) else: assert type(classifier) is str config[\"classifier\"] =", "_FIT_PARAMS: {\"early_stopping_rounds\": 5, \"eval_metric\": \"logloss\", \"validation_fraction\": 0.1, \"verbose\": False}} elif classifier_name == MLP:", "{num_models_to_train} separate models...\") jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)] pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs)", ":param logger: :return: \"\"\" serialization_dir = config_global[RUN_WORKING_DIR] eval_data_path = config_data[\"eval_data_path\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"]", "feature selection results plot_destination = serialization_dir / \"rfecv_plot.png\" ax = sns.lineplot(x=\"num-features\", y=\"weighted-f1\", data=df_grid_scores)", "f.write(\"\\n\".join(selected_features)) logger.info(\"Selected features: \" + \"\\n\".join(selected_features)) # collect scores df_grid_scores = [] for", "= config_data[\"doc_partitioning\"] oracle_mention_pair_generation = config_data[\"oracle_mention_pair_generation\"] train_X, train_y = get_X_and_y_for_pipeline(logger, train_data, doc_partitioning=doc_partitioning, oracle_mention_pair_generation=oracle_mention_pair_generation) base_pipeline_config" ]
[ "if cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\"))", "\"; Výsledek testu:\") ) if o is not None: rich_print(o) except: rich_print(\"Test skončil", "Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True,", "elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se", "False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\"", "konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou", "(resp. ne relativně), což už je v pořádku. # Pozn.: Pokud někdo dumá", "cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try:", "máme asynchoní funkci, lepší řešení pro poslední řádku je: ``` await keyhandler(handler) ```", "RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else lasttime", "- Warning; Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI # 3 -", "(Již zobrazeno {offset} výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def", "bude ignorovat, tudíž se brát v potaz pouze argumenty z příkazové řádky\", action=\"store_true\",", "done_on_enter: bool = True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat zmáčklé", "last = current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \" + str(last)", "se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity =", "def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool,", "tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack na to, aby `bakalarishell` šel", "(i když k tomu nejspíše nikdy nedojde) # (a navíc alespoň nemusí řešit", "neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo", "None username: str | None = None password: str | None = None", ") if args.verbose < 4: for logger in [ logging.getLogger(name) for name in", "klávesy, které následně passuje do dané funkce. Args: handler: Funkce do které se", "prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False,", "0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\") schuzky = fresh() length", "= getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\",", "else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\"", "print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove", "done: Funkce, která při zavolání ukončí záznam kláves. Pokud je `None`, nic se", "Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len =", "cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False", "parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém", "`bakalarishell` šel spustit také přímo ze zdrojové složky # Pokud se `bakalarishell` spustí", "count_invalid = 0 try: while True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH,", "platformdirs import requests import rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import", "None] start: bool = True tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens,", "0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], )", "se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x", "for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] =", "parsed args = Args(**parsed) # Verbose: # 0 - Nic # 1 -", "který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test,", "args.password is None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo", "progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError", "print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"]", ") parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž se brát", "predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell(", "RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id)", "= cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] =", "not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\",", "\"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\",", "show(zprava, f\"*** Zpráva {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt:", "total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id]", "f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)() rich_print(", "print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity = (count_total - count_invalid) / count_total", "BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web scraper, ale API zní líp", "as progress: threads: list[threading.Thread] = [] for task in tasks: thread = threading.Thread(", "with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return parsed parser = argparse.ArgumentParser(", "help=\"Soubor s konfigurací se bude ignorovat, tudíž se brát v potaz pouze argumenty", "příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt):", ") first = True for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if", "predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] =", "True) as f: parsed = json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující", "v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace", "= f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login() return x return patched", "count += 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop()", "schůzky v Lootingu, zkouším načíst ze serveru\") schuzky = fresh() length = len(schuzky)", "konfigurace není uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir()", "změnu nefugnuje při importu jako modul, jelikož v tom případě # hledá modul", "print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID:", "rich_print( \"Nebyly získány informace o stavu serveru, ale žádné funkce by tímto neměli", "timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní a zítřejší schůzky:\") rich_print(", "None = None, advance: float | None = None, description: str | None", "return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován... Sadge\") return # return", "import InsecureRequestWarning # Takový hack na to, aby `bakalarishell` šel spustit také přímo", "message = f\"{text} Ano/Ne{'' if default is None else (' (Ano)' if default", "0 for ukol in ukoly: if ukol.done: hotove += 1 else: nehotove +=", "RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task:", "Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def", "studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError:", "None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut", "import logging import logging.config import os import threading import time import traceback import", "nikdy nedojde) # (a navíc alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__,", "připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False)", "main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní `import` selže", "no_init: bool = False no_import: bool = False disable_config: bool = False commands:", "True for znamka in filter( lambda x: min(lasttime, today - timedelta(5)) < x.date1", "bool = True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy,", "len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se počet záznamů", "předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note", "== \"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace", "aby `bakalarishell` šel spustit také přímo ze zdrojové složky # Pokud se `bakalarishell`", "= \"\", default: int | None = None): print(text, \"\" if default is", "is not None: parsed = from_config | parsed args = Args(**parsed) # Verbose:", "and ukol.done: continue cls() show( ukol, f\"*** Domácí úkol {count} z {hotove +", "get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if", "task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\",", ") shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command(", "# Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\" *", "%m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není", "opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool: try: if", "import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs(", "task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE ##### ################################################## def rich_print(", "traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession", "color = \"red\" elif is_before and delta <= timedelta(minutes=5): color = \"yellow\" elif", "json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou případně po JSONu, co jsme", "`bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne v pořádku #", "except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet studentů je {length}\")", "\"Server není dostupný; Uložená data byla již importována, je tedy možné pracovat se", "modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def", "!= new_id_len: print( f\"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New:", "rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in", "ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\")", "shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command(", "importována, je tedy možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt:", "if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol", "tak vše proběhne v pořádku # Pokud se ale spustí přes \"python main.py\"", "and not x.confirmed) or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if", "create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from rich.console import Console", "-> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode: str =", "rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True", "action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze", "program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací", "dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\"", "nebude proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\",", "api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"]", "str | None = None, **kwargs, ): c = rich.get_console() if file is", "return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse(", "+ str(last) + \" \" * 20, end=\"\\r\" ) # Some spaces to", "= None ) -> bool: message = f\"{text} Ano/Ne{'' if default is None", "False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka", "Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován... Sadge\")", "cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh: bool =", "force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else", "old_id_len != new_id_len: print( f\"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len};", "default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command(", "nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\": done()", "rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'}", "webbrowser from dataclasses import dataclass, field from datetime import datetime, timedelta from typing", "): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce. Args:", "zobrazHotove and homework.Done: # continue # print(\"*** Domácí úkol ***\") # print(homework.Format()) #", "spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def", "i negativní if not is_before and delta >= timedelta(hours=-1): color = \"red\" elif", "není uložená\") elif cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace", "class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks:", "dostupný; Uložená data byla již importována, je tedy možné pracovat se starými daty\",", "str | None = None username: str | None = None password: str", "not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file:", "bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch", "browser: str | None = None executable_path: str | None = None verbose:", "\"N - Označí úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\", ] )", "else \"; Výsledek testu:\") ) if o is not None: rich_print(o) except: rich_print(\"Test", "přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud", "Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ```", "key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key == \"n\":", "if not args.no_init: successful_init = Init() if not args.no_import: try: with get_io_file(\"main\", False)", "short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]),", "ano/true/yes/1 / ne/false/no/0 if color is not None: rich_print(message, end=\"\", color=color) inpt =", "c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\")", "patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3`", ") shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, )", "if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH,", "= 1 for znamka in znamky: try: show(znamka, f\"*** Známka {count} z {length}", "parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser,", "!= \"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední známky:\") note =", "# \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting()", "Nic # 1 - Warning; Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI", "`in` if not (\"disable_config\" in parsed): from_config = load_args_from_config() if from_config is not", "Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"password\", default=None, )", "= ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first = True for", "not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is not None:", "pro daný argument načíst hodnotu z configu (protože hodnota z configu # se", "klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která při zavolání", "in threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\",", ") ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", )", "False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] =", "-> bool: message = f\"{text} Ano/Ne{'' if default is None else (' (Ano)'", "1, 1), datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs)", "from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from rich.console import Console from", "= api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output", "bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky)", "id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet", "False commands: list[str] = field(default_factory=list) args: Args class RichTask: def __init__(self, progress: Progress,", "key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file:", "= Args(**parsed) # Verbose: # 0 - Nic # 1 - Warning; Pouze", "složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID: int): m =", "použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který", "return False if args.url is None: try: args.url = input(\"URL adresa serveru: \")", "načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla", "api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally:", "tudíž selže. if TYPE_CHECKING: from . import shell else: try: from . import", "fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu,", "%m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\")", "\"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s (%s)\",", "True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\")", "def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None): if title is not", "open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True)", ") parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command(", "# cls() # for homework in homeworks: # if not zobrazHotove and homework.Done:", "homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové", "import Syntax from rich.traceback import install as tb_install from urllib3.exceptions import InsecureRequestWarning #", "\"\", default: bool | None = None, color: str | None = None", "Delta totiž může být očividně i negativní if not is_before and delta >=", "except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total", "se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato", "| None = None, visible: bool | None = None, refresh: bool =", "- Info # 5 - NOSET if args.verbose != 0: logging.basicConfig( level=[ None,", "bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try:", "not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS =", "Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad:", "False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init:", "f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze", "session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme", "poběží v omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu", "\"Z - Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable): key", "None: global args with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return parsed", "rich_print( f\"Test {ID} skončil\" + (\"\" if o is None else \"; Výsledek", "not in args.commands and (not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0,", "help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\",", "!= 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command)", "BakalářiAPI # 2 - Info; Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI", "< x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first", "test již není podporován... Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\")", "KeyPress, done: Callable): key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key", "# Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne", "first = False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} -", "1 for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ):", "title is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí", ") else: print(\"Žádná konfigurace není uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path)", "# Indent, protože chci, aby to šlo přehledně upravit i z editoru (i", "jako modul (= přes `import`), tak vše proběhne v pořádku # Pokud se", "{nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1", "or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1 for ukol in ukoly:", ") parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky", "Keys.Enter: done() # elif key_press.key == Keys.F4: # for key_press in keys: #", "os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY #####", "parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser(", "filter( lambda x: (x.need_confirm and not x.confirmed) or min(lasttime, today - timedelta(5)) <", "isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako hotový\", \"\"", "0 count_invalid = 0 try: while True: count_total += 1 output = api.get_homeworks(", "TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = [] for task in tasks: thread", "raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud", "datetime = datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except", "dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int,", "dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if", "pass finally: le = len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy:", "is None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno,", "má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato", "je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, )", "cls() count = 1 for zprava in zpravy: try: show(zprava, f\"*** Zpráva {count}", "přímo ze zdrojové složky # Pokud se `bakalarishell` spustí jako modul (= přes", ") parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser(", "x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect", "(hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\")", "dataclasses import dataclass, field from datetime import datetime, timedelta from typing import IO,", "None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá", "= inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args,", "hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není", "%Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k", "f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\",", "jako nehotový\", \"Z - Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done:", "\"\" ) ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0:", "na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True for schuzka in filter( lambda", "bool = False no_init: bool = False no_import: bool = False disable_config: bool", "(typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch +=", "bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti)", "if default is None: continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\")", "None, color: str | None = None ) -> bool: message = f\"{text}", "import rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding", "for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI,", "################################################## def rich_print( *objects: Any, sep: str = \" \", end: str =", "zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool: try:", "!= typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue", "30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\")", "o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\",", "metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login", "cls() while offset < length: try: for _ in range(count): if offset >=", "konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o", "aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec,", "data vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try:", "serveru\") schuzky = fresh() length = len(schuzky) if length == 0: print(\"Nebyly nalezeny", "(count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity)", "%m. %Y')}\" ) first = True for schuzka in filter( lambda x: today_aware", "c = rich.get_console() if file is None else Console(file=file) if color is not", "api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující se ID!", "task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy =", "\"default=None\", jinak se neprofiltrují # a nelze pro daný argument načíst hodnotu z", "[] try: with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for", "None] | None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool = False, ):", "False, color: str | None = None, **kwargs, ): c = rich.get_console() if", "Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool", "= \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url: str | None =", "cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z", "prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password = args.password try: rich_print( f\"Kontrola", "force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné", "Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs:", "class Args: url: str | None = None username: str | None =", "PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně", "== \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title:", "bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color = \"\" #", "def get_io_file(file: str, create_file: bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí file", "ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI", "\"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError,", "api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas: \" + str(last)", "bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta", "TESTY ##### ################################################## def RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\" if", "| None = None ) -> bool: message = f\"{text} Ano/Ne{'' if default", "# print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni", "print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1 for znamka in znamky: try:", "[bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]'", "- Info; Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI # 4 -", "selže (\"ImportError: attempted relative import with no # known parent package\") a `shell`", "print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6(): count_total", "api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as", "argparse) parsed = {k: v for k, v in vars(parser.parse_args()).items() if v is", "neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo():", "threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ), ),", "ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\")", "is_before and delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře schůzku", ") subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", )", "try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, )", "except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False if args.username is", "správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()`", "shell poběží v omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí", "při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs)", "+ (\"\" if note == \"\" else f\" - {note}\") ) first =", "= [] for task in tasks: thread = threading.Thread( target=task.function, args=( api, RichTask(", "count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt:", "in args.commands and (not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0,", "na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger", "1), datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka", "výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool =", "print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity", "inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs)", "[magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and", "open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd", "není dostupný; Uložená data byla již importována, je tedy možné pracovat se starými", "probrallity) ################################################## ##### MAIN ##### ################################################## def main(): global api global args def", "\"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\",", ") first = True for schuzka in filter( lambda x: today_aware < x.start_time", "load_args_from_config() -> dict | None: global args with get_io_file(CONFIG_FILE, True) as f: parsed", "False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None = None if args.browser", "a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\",", "nalezeny žádné aktualní schůzky\") return cls() count = 1 for zprava in zpravy:", "action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž", "== \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key == \"n\": obj.mark_as_done(api,", "text: str = \"\", default: bool | None = None, color: str |", "exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config(): with", "f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True for schuzka", "if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\":", "(z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v", "in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True", "import warnings import webbrowser from dataclasses import dataclass, field from datetime import datetime,", "int): m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\")", "přepnutí do online módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text:", "hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key", "if color is not None: rich_print(message, end=\"\", color=color) inpt = input() else: inpt", "IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59,", "MUSÍ mít \"default=None\", jinak se neprofiltrují # a nelze pro daný argument načíst", "dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden import", "'-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má spustit\",", ") parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly", "\"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci", "load_args_from_config() if from_config is not None: parsed = from_config | parsed args =", "bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")])", "shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) )", "input_letter = inpt[0].lower() if input_letter in \"aty1\": return True if input_letter in \"nf0\":", "úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN ##### ################################################## def main(): global", "\"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose,", "total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0])", "args: Args class RichTask: def __init__(self, progress: Progress, task_id: TaskID) -> None: self.progress", "nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password =", "short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, ) ) if __name__ == \"__main__\":", "progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update( self, total: float |", "nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace", "Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve", "= \"_lasttime\" @dataclass class Args: url: str | None = None username: str", "task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api)", "0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1 for zprava", "klávesy. done: Funkce, která při zavolání ukončí záznam kláves. Pokud je `None`, nic", "prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro", "): c = rich.get_console() if file is None else Console(file=file) if color is", "si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() ->", "BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = [] for task", "na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True for znamka in filter(", "filtrujeme, tak pokud i po filtrování je \"disable_config\" # v \"parsed\" tak má", "< x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens", "# print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6(): count_total =", ") ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI,", "jako hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif", "not None: rich_print(message, end=\"\", color=color) inpt = input() else: inpt = input(message) if", ") shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(", "= (count_total - count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\"", "dict | None: global args with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f)", "asyncio.Event() inpt = create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press", "převádí na string, ale zatím to problém není, tak to neřeším eShrug objects", "= False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields,", "= \"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí", "force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return []", "patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False` (v", "len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků", "= None, description: str | None = None, visible: bool | None =", "def RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji", "print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter -", "fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil", "no_import: bool = False disable_config: bool = False commands: list[str] = field(default_factory=list) args:", "configu (protože hodnota z configu # se přepíše hodnotou \"None\" z argparse) parsed", "má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in` if not (\"disable_config\"", "Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj,", "True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]'", "Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved", "action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, )", "běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento", "\"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není", "tak se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude", "asyncio import getpass import inspect import json import logging import logging.config import os", "f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\"", "je: ``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt = create_input() done", "testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True: last = session.get(", "import install as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack na to,", "na `False` (v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## #####", "k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]'", "total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e:", "parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz);", "\", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, )", "\"green\"), \"N - Označí úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\", ]", "first = True for znamka in filter( lambda x: (x.need_confirm and not x.confirmed)", "shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import,", "output = [\"Enter - Pokračování\"] if enter_pokracovani else [] for key in keys:", "dostupný; Chce importovat uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print(", "- count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\")", "is_before and delta >= timedelta(hours=-1): color = \"red\" elif is_before and delta <=", "api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la", "argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web scraper,", "'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, )", "from_config = load_args_from_config() if from_config is not None: parsed = from_config | parsed", "else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in filter(lambda x: not", "IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs import requests import rich", "z {hotove + nehotove if zobraz_hotove else nehotove} ***\", ) count += 1", "advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time =", "=> case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta", "= args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if", "print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else:", "= \"\\n\", file: IO[str] | None = None, flush: bool = False, color:", "CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url: str | None", "if key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()),", "start(self): self.progress.start_task(self.task_id) def update( self, total: float | None = None, completed: float", "přehledně upravit i z editoru (i když k tomu nejspíše nikdy nedojde) #", "\"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\",", "dat skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") #", "f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime", "jako modul, jelikož v tom případě # hledá modul `shell` jako \"globální\" modul", "být očividně i negativní if not is_before and delta >= timedelta(hours=-1): color =", "delta >= timedelta(hours=-1): color = \"red\" elif is_before and delta <= timedelta(minutes=5): color", "se naimportuje \"přímo\" (resp. ne relativně), což už je v pořádku. # Pozn.:", "shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command(", "for ukol in ukoly: try: if not zobraz_hotove and ukol.done: continue cls() show(", ") id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return", "dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační", "force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove =", "vše převádí na string, ale zatím to problém není, tak to neřeším eShrug", "= fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1 for ukol", "datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass", "default: bool | None = None, color: str | None = None )", "for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky = fresh()", "hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)() rich_print( f\"Test {ID}", "type (new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u", "(protože hodnota z configu # se přepíše hodnotou \"None\" z argparse) parsed =", "# Pozn.: Pokud někdo dumá nad tím, proč zde tedy není jen druhá", "else: rich_print( \"Server není dostupný; Uložená data byla již importována, je tedy možné", "highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return", "partial_init_mode() return False if args.username is None: try: args.username = input(\"Přihlašovací jméno: \")", "True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data", "není jen druhá možnost, tak to je # kvůli tomu, že ta zase", "homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True)", "Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data byla již importována,", "%.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z", "modul (ne jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from . import", "nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data", "shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser()", "vše proběhne v pořádku # Pokud se ale spustí přes \"python main.py\" nebo", "print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first =", "složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path),", "False no_init: bool = False no_import: bool = False disable_config: bool = False", "# `cast()` protože jsem zatím nepřišel na způsob, jak dostat hint při patchování", "args.test is not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals", "= input() else: inpt = input(message) if len(inpt) == 0: if default is", ") count += 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True):", "None = None, refresh: bool = False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed,", "| None = None browser: str | None = None executable_path: str |", "text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last", "lepší řešení pro poslední řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt =", "handler na daný soubor `file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if", "timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api:", "unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1)", "): if first: first = False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d.", "partial_init_mode() return False if args.url is None: try: args.url = input(\"URL adresa serveru:", "Známka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break", "následně passuje do dané funkce. Args: handler: Funkce do které se passují zaznamenané", "úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove} ***\", ) count", "bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api:", "do online módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str", "filter( lambda x: today_aware < x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting),", "rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except", "shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x for", "api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number", "length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1", "x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta]", "done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str:", "bool = False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje do", "byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje jsou", "from datetime import datetime, timedelta from typing import IO, TYPE_CHECKING, Any, Callable, cast", "0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\") zpravy = fresh() length", "None = None, flush: bool = False, color: str | None = None,", "from dataclasses import dataclass, field from datetime import datetime, timedelta from typing import", "dialog_cislo(text: str = \"\", default: int | None = None): print(text, \"\" if", "main(): global api global args def load_args_from_config() -> dict | None: global args", "f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True for", "else f\"({default})\") while True: inpt = input() if not inpt: if default is", "fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu,", "selenium: bakalariapi.SeleniumHandler | None = None if args.browser is not None: selenium =", "\"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key =", "načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd", "# = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\")", "except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru, ale žádné funkce by", "in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first =", "\"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api)", "and key_press.key == Keys.Enter: done() # elif key_press.key == Keys.F4: # for key_press", "m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try:", "def prepare_shell(): global shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals", "not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path,", "progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for", "list[str] = field(default_factory=list) args: Args class RichTask: def __init__(self, progress: Progress, task_id: TaskID)", "Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není podporován...", "\"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče,", "show(schuzka, f\"*** Schůzka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt:", "short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se", "Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\",", "``` Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku je: ```", "== Keys.Enter: done() # elif key_press.key == Keys.F4: # for key_press in keys:", "žádné aktualní schůzky\") return cls() count = 1 for zprava in zpravy: try:", ") id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(),", "ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, )", "% probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových", ") ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna,", "parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se", "spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument(", "True, tak se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt`", "import shell else: try: from . import shell except ImportError: import shell tb_install(show_locals=True)", "shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky,", "\"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test již není podporován... Sadge\")", "id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new:", "print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky = api.get_grades(", "count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset = 0", "a nelze pro daný argument načíst hodnotu z configu (protože hodnota z configu", "FUNKCE ##### ################################################## def rich_print( *objects: Any, sep: str = \" \", end:", "shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser =", "str | None = None password: str | None = None browser: str", "zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in", "kvůli tomu, že ta zase pro změnu nefugnuje při importu jako modul, jelikož", "údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou", "Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError:", "if not zobraz_hotove and ukol.done: continue cls() show( ukol, f\"*** Domácí úkol {count}", "= 0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů v datech (old):", "zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1 for", "# return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován... Sadge\") return #", "parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to", ") first = True for znamka in filter( lambda x: (x.need_confirm and not", "None: continue return default input_letter = inpt[0].lower() if input_letter in \"aty1\": return True", "None) and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat", "verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit:", "if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else:", "[] output = [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání", "JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data,", "příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool: try: if args.no_import: if dialog_ano_ne(", "znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return", "na string, ale zatím to problém není, tak to neřeším eShrug objects =", "'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí", "return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent,", "módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument(", "automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna,", "inpt = input() if not inpt: if default is None: continue return default", "`prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je", "first: first = False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z", "potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\",", "rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color = \"\" # Delta", "dostat hint při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound =", "output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid", "dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+'", "RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print()", "z configu (protože hodnota z configu # se přepíše hodnotou \"None\" z argparse)", "for key_press in keys: if done_on_enter and key_press.key == Keys.Enter: done() # elif", "= API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\")", "def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads:", "get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci, aby to šlo přehledně upravit", "upravit i z editoru (i když k tomu nejspíše nikdy nedojde) # (a", "# 5 - NOSET if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\",", "handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if", "str = \" \", end: str = \"\\n\", file: IO[str] | None =", "(\"ImportError: attempted relative import with no # known parent package\") a `shell` se", "import datetime, timedelta from typing import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi", "tak to je # kvůli tomu, že ta zase pro změnu nefugnuje při", "login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify = False", "zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() # for homework in", "\"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí HTML", "Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace", "flaga přítomna, vynutí se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser", "= False no_init: bool = False no_import: bool = False disable_config: bool =", "ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs", "parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento", "\"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) ) parser =", "tato flaga přítomna, nebude proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None,", "rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)() rich_print( f\"Test {ID} skončil\" +", "= False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané", "uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if", "někdy bude problém, že se vše převádí na string, ale zatím to problém", "_globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI", "25) except KeyboardInterrupt: return offset = 0 cls() while offset < length: try:", "= len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se počet", "category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def partial_init_mode(): rich_print(", "##### TESTY ##### ################################################## def RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\"", "druhá možnost, tak to je # kvůli tomu, že ta zase pro změnu", "shell else: try: from . import shell except ImportError: import shell tb_install(show_locals=True) cls", "delta <= timedelta(minutes=5): color = \"yellow\" elif is_before and delta <= timedelta(minutes=30): color", "and homework.Done: # continue # print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\")", "def ask_import() -> bool: try: if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce", "relativně), což už je v pořádku. # Pozn.: Pokud někdo dumá nad tím,", "predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False)", "složky # Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak vše", "ID type (new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb", "datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask):", "zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if", "šlo přehledně upravit i z editoru (i když k tomu nejspíše nikdy nedojde)", "elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako hotový\",", "`requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ##################################################", "Info # 5 - NOSET if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\",", "RichTask: def __init__(self, progress: Progress, task_id: TaskID) -> None: self.progress = progress self.task_id", "\"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str", "str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat", "nehotove += 1 if hotove + nehotove == 0: print(\"Nebyly nalezeny žádné aktualní", "args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if not", "úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\",", "{komens.date1.strftime('%d. %m. %Y')}\" ) first = True for schuzka in filter( lambda x:", "= api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks(", "funkce. Args: handler: Funkce do které se passují zaznamenané klávesy. Bere 2 argumenty:", "se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser,", "return False def dialog_cislo(text: str = \"\", default: int | None = None):", "bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí", "auto_run: bool = False no_init: bool = False no_import: bool = False disable_config:", "f\"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove} ***\",", "default is None else (' (Ano)' if default else ' (Ne)')}: \" while", "key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type", "potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož", "is not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def", "= api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length", "print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(),", "or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now()", "1 for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length} ***\")", "Args: url: str | None = None username: str | None = None", "mít \"default=None\", jinak se neprofiltrují # a nelze pro daný argument načíst hodnotu", "= progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update( self, total: float", "bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove", "získány ({length}), zobrazuji...\") cls() count = 1 for znamka in znamky: try: show(znamka,", "filter( lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade != \"?\",", "bool: try: if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\",", "zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None", "Keys.ControlC: raise KeyboardInterrupt elif handler is not None: handler(key_press, done) with inpt.raw_mode(): with", "\"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = [] for task in tasks:", "roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url: str", "file) def get_io_file(file: str, create_file: bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí", "# Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell", "příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny", "zapsána do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False)", "Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now()", "break last = current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \" +", "0 - Nic # 1 - Warning; Pouze BakalářiAPI # 2 - Info;", ") cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh: bool", "key_press.key == Keys.F4: # for key_press in keys: # if key_press.key == Keys.Escape:", "Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser()", "args.password = \"\" api.password = args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích", "done_on_enter and key_press.key == Keys.Enter: done() # elif key_press.key == Keys.F4: # for", "flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\",", "rich.traceback import install as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack na", "`True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti", "is None else (' (Ano)' if default else ' (Ne)')}: \" while True:", "je `None`, nic se nevolá. Hodnota `None` má smysl pouze pokud parametr `done_on_enter`", ").get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky", "else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, )", "dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser", "způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0])", "and delta <= timedelta(minutes=5): color = \"yellow\" elif is_before and delta <= timedelta(minutes=30):", "= shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True )", "task.finished_time = 0 ################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects: Any, sep:", "str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani else [] for key", "== \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož", "as f: # Indent, protože chci, aby to šlo přehledně upravit i z", "print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total - count_invalid) /", "%Y')}\" + (\"\" if note == \"\" else f\" - {note}\") ) first", "for logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue", "self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task", "choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který", "if last < current: print(\"\\n\") break last = current time.sleep(1) print( \"Sezení bylo", "True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == ''", "print(\"=\" * 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech", "os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\")", "rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True,", "se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je", "target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ), ), )", "hledá modul `shell` jako \"globální\" modul (ne jako \"lokální\" ve složce), tudíž selže.", "číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False,", "typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online", "znamky: try: show(znamka, f\"*** Známka {count} z {length} ***\") count += 1 cls()", "as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\",", "možnost, tak to je # kvůli tomu, že ta zase pro změnu nefugnuje", "schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress:", "if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with Progress() as progress:", "): Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data byla již", "= len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\"", "dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI", "jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable):", "def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime", "nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands and", "není podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není", "autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\"", "bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task:", "None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved:", "f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum", "\"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max", "rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím", "id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api,", "20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů", "RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\",", "minute=0, second=0, microsecond=0) ) first = True for znamka in filter( lambda x:", "output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str |", "x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z", "allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in", "{znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note == \"\" else f\" - {note}\")", "\" \", end: str = \"\\n\", file: IO[str] | None = None, flush:", "prepare_shell(): global shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals =", "print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako hotový\", \"\" if obj.done else", "try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x", "and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is not None: handler(key_press, done)", "except KeyboardInterrupt: partial_init_mode() return False if args.url is None: try: args.url = input(\"URL", "jméno; Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\",", "která při zavolání ukončí záznam kláves. Pokud je `None`, nic se nevolá. Hodnota", "False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, )", "if file is None else Console(file=file) if color is not None: # Pravděpodobně", "Pravděpodobně někdy bude problém, že se vše převádí na string, ale zatím to", "try: from . import shell except ImportError: import shell tb_install(show_locals=True) cls = shell.cls", "začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool:", "není, tak to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects,", "selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init", "for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length =", "a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\",", "(Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0 if color is not None:", "parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí", "\"r+\") -> IO: \"\"\"Vrátí file handler na daný soubor `file` v uživatelské (data)", "spustit také přímo ze zdrojové složky # Pokud se `bakalarishell` spustí jako modul", "def Command_Komens(limit: int | None = None, force_fresh: bool = False): def fresh()", "float | None = None, description: str | None = None, visible: bool", "(znamka.need_confirm and not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True) as f:", "nalezeny žádné aktualní schůzky\") return cls() count = 1 for schuzka in schuzky:", "str = \"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print(", ") lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime =", "1s) do konce a bylo prodlouženo na \" + str(current) ) except KeyboardInterrupt:", "if first: first = False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m.", "verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru,", "i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True,", "zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key", "int | None = None): print(text, \"\" if default is None else f\"({default})\")", "stisk klávesy. done: Funkce, která při zavolání ukončí záznam kláves. Pokud je `None`,", "f: parsed = json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\",", "first = True for schuzka in filter( lambda x: today_aware < x.start_time and", "-> None: self.progress = progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update(", "partial_init_notice() return [] output = [] with Progress() as progress: task = RichTask(", "return length = len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try: count =", "Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky,", "jsem zatím nepřišel na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u)", "autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro argparse", "bakalariapi import platformdirs import requests import rich from bakalariapi.utils import cs_timedelta, parseHTML from", "Init() if not args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError:", "Keys.F4: # for key_press in keys: # if key_press.key == Keys.Escape: # raise", "= \"\" api.password = args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů", "modul `shell` jako \"globální\" modul (ne jako \"lokální\" ve složce), tudíž selže. if", "informace o stavu serveru, ale žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\",", "\"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost'", "for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length} ***\") count", "nehotove if zobraz_hotove else nehotove} ***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\")", "str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f,", "schůzku v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress,", "session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last <", "if from_config is not None: parsed = from_config | parsed args = Args(**parsed)", "False if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ )", "(shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x for x in", "[] for key in keys: if isinstance(key, tuple): if key[1] == \"\": output.append(key[0])", "get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(),", ") # Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current =", "time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \" + str(last) + \" (+", "api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length =", "= True for schuzka in filter( lambda x: today_aware < x.start_time and x.start_time", "složka), tak relativní `import` selže (\"ImportError: attempted relative import with no # known", "try: if not zobraz_hotove and ukol.done: continue cls() show( ukol, f\"*** Domácí úkol", "adresa serveru\", color=\"red\") partial_init_mode() return False if args.username is None: try: args.username =", "{count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je", "bool = False disable_config: bool = False commands: list[str] = field(default_factory=list) args: Args", "if args.password is None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo", "for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if", "t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m,", "\"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument(", "tom případě # hledá modul `shell` jako \"globální\" modul (ne jako \"lokální\" ve", "print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6(): count_total = 0", "'{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace:", "tasks: thread = threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0", "indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože jsem", "= Init() if not args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except", "continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující", ") return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs =", "Test5(): print(\"Tento test již není podporován... Sadge\") return # homeworks = API.GetHomeworks() #", "input() if not inpt: if default is None: continue return default if inpt.isdecimal():", "verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else", "metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True,", "`shell` se naimportuje \"přímo\" (resp. ne relativně), což už je v pořádku. #", "api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE =", "False no_import: bool = False disable_config: bool = False commands: list[str] = field(default_factory=list)", "encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f:", "args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password", "\"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", )", "údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if", "except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name:", "< x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False", "key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter and key_press.key == Keys.Enter: done()", "if color is not None: # Pravděpodobně někdy bude problém, že se vše", "Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = []", "task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str", "\"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní `import`", "o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato", "současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje", "short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\",", "tato flaga přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, )", "command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\",", "except FileNotFoundError: pass if args.test is not None: RunTest(args.test) prepare_shell() # Chceme `main()`", "x: today_aware < x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if", "Debug; Pouze BakalářiAPI # 4 - Info # 5 - NOSET if args.verbose", "for thread in threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli", "True) as f: # Indent, protože chci, aby to šlo přehledně upravit i", "zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\")", "default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž se", "args.commands and (not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)", "pokud byl delší jak náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána do", ":)\", ) if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na", "handler is not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait()", "global args def load_args_from_config() -> dict | None: global args with get_io_file(CONFIG_FILE, True)", "cast import bakalariapi import platformdirs import requests import rich from bakalariapi.utils import cs_timedelta,", "timedelta(minutes=5): color = \"yellow\" elif is_before and delta <= timedelta(minutes=30): color = \"green\"", "`done_on_enter` je `True`. done_on_enter: Pokud True, tak se při klávese Enter ukončí záznam", "pass return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f: #", "z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False,", "prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable):", "rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje", "length = len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return", "Úkolu: \"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test již není podporován...", "TaskID) -> None: self.progress = progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def", ") def ask_import() -> bool: try: if args.no_import: if dialog_ano_ne( \"Server není dostupný;", "return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return", "+ (\"\" if o is None else \"; Výsledek testu:\") ) if o", "\\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)):", "* 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\")", "běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen,", "if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh", "password: str | None = None browser: str | None = None executable_path:", "ze serveru\") schuzky = fresh() length = len(schuzky) if length == 0: print(\"Nebyly", "task in tasks: thread = threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description,", "je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který", "Callable): key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako", "# se přepíše hodnotou \"None\" z argparse) parsed = {k: v for k,", ") shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser =", "api global args def load_args_from_config() -> dict | None: global args with get_io_file(CONFIG_FILE,", "args.no_init: successful_init = Init() if not args.no_import: try: with get_io_file(\"main\", False) as f:", "is not None: # Pravděpodobně někdy bude problém, že se vše převádí na", "o stavu serveru, ale žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\", )", "raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\")", "lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI,", "if args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api =", "False if args.password is None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print(", "return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server", "fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False,", "0 print(\"=\" * 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v", "import json import logging import logging.config import os import threading import time import", "v for k, v in vars(parser.parse_args()).items() if v is not None} # Jelikož", "náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name:", "description: str | None = None, visible: bool | None = None, refresh:", "output if force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) ==", "záznam kláves. Pokud je `None`, nic se nevolá. Hodnota `None` má smysl pouze", "= RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID)", "# pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol", "***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool =", "color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url is None: try:", "IO[str] | None = None, flush: bool = False, color: str | None", "in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující", "f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not None: RunTest(args.test) prepare_shell() #", "parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys", "print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except", "\"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler( handler:", "if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti", "default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # =>", "v in vars(parser.parse_args()).items() if v is not None} # Jelikož hodnoty filtrujeme, tak", "je tato flaga přítomna, nebude proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\",", "| parsed args = Args(**parsed) # Verbose: # 0 - Nic # 1", "= None if args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, )", "Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\",", "f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m.", "else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None", "rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta =", "argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která při zavolání ukončí záznam kláves.", "na daný soubor `file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not", "kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu", "najednou?\", 25) except KeyboardInterrupt: return offset = 0 cls() while offset < length:", "##### ################################################## def rich_print( *objects: Any, sep: str = \" \", end: str", "# if not zobrazHotove and homework.Done: # continue # print(\"*** Domácí úkol ***\")", "= f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)()", "= [] try: with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la))", "else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession )", "zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for", "today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0,", "== 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové", "``` def handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ```", "len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with", "1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z", "unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy = fresh() else:", "\" + str(last) + \" \" * 20, end=\"\\r\" ) # Some spaces", "successful_init = Init() if not args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read()))", "asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys(", "try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is", "False) print(\"Úkol označen jako nehotový\") elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()),", ".astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first = True for znamka in filter(", "* 20, end=\"\\r\" ) # Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND)", "uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\")", "+ \" (+ max 1s) do konce a bylo prodlouženo na \" +", "as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt:", "subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\",", "dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po", "return False if args.password is None: try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt:", "file handler na daný soubor `file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file)", "== \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}'", "else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH,", "import bakalariapi import platformdirs import requests import rich from bakalariapi.utils import cs_timedelta, parseHTML", "klávasu...\") # cls() def Test6(): count_total = 0 count_invalid = 0 try: while", "0 try: while True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False,", "print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0,", "help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s", "if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) #", "in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length,", ") as progress: threads: list[threading.Thread] = [] for task in tasks: thread =", ") shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers =", "f\"Test {ID} skončil\" + (\"\" if o is None else \"; Výsledek testu:\")", "= \" \", end: str = \"\\n\", file: IO[str] | None = None,", "str | None = None, visible: bool | None = None, refresh: bool", "return cls() count = 1 for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka", "as session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while", "(př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za běhu\", nargs=\"?\",", "parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí", "for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli", "print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str =", "str(last) + \" \" * 20, end=\"\\r\" ) # Some spaces to rewrite", "except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0():", "= None auto_run: bool = False no_init: bool = False no_import: bool =", "f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in filter(lambda x: not x.done,", "dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\",", "| None = None, **kwargs, ): c = rich.get_console() if file is None", "\"Sezení bylo prodlouženo, když zbývalo \" + str(last) + \" (+ max 1s)", "Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání", "parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\",", "k tomu nejspíše nikdy nedojde) # (a navíc alespoň nemusí řešit formátování při", "color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se", "as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved =", "AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() # for homework in homeworks: #", "session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True:", "zpráv, které se načtou a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\",", "při importu jako modul, jelikož v tom případě # hledá modul `shell` jako", "aktualní schůzky\") return cls() count = 1 for schuzka in schuzky: try: show(schuzka,", "print(f\"Studenti získáni, počet studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\",", "# Takový hack na to, aby `bakalarishell` šel spustit také přímo ze zdrojové", "return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] |", "print_keys( [ (\"H - Označí úkol jako hotový\", \"\" if obj.done else \"green\"),", ") parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden import dat (z", "api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break last = current time.sleep(1)", "api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid += 1", "appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args:", "dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]'", "Jelikož hodnoty filtrujeme, tak pokud i po filtrování je \"disable_config\" # v \"parsed\"", "print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress: task = RichTask(progress,", "default=None, ) # Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují", "partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data byla již importována, je tedy", "rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals,", "v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`,", "Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import install as tb_install", "`None`, nic se nevolá. Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je", "logging import logging.config import os import threading import time import traceback import warnings", "vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif cmd == \"check\":", ") with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init:", "argparse MUSÍ mít \"default=None\", jinak se neprofiltrují # a nelze pro daný argument", "= shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou", "is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in filter(lambda", "False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH )", "Indent, protože chci, aby to šlo přehledně upravit i z editoru (i když", "Pouze BakalářiAPI # 4 - Info # 5 - NOSET if args.verbose !=", "new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch =", "return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode: str = \"r+\") ->", "ve složce), tudíž selže. if TYPE_CHECKING: from . import shell else: try: from", "'{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read()))", "get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor", "je tato flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji", "Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press:", "Callable): key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\":", "problém není, tak to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return", "obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print(", ") parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # => case-insensitive", "soubor `file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if", "None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo", "vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with", "None, description: str | None = None, visible: bool | None = None,", "f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze", "fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None = None,", "daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser", "isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else", ") parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program se", "\"\" else f\" - {note}\") ) first = True for komens in filter(", "1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print(", "def __init__(self, progress: Progress, task_id: TaskID) -> None: self.progress = progress self.task_id =", "list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with Progress() as", "# dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna,", "\"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se získání dat ze serveru\",", "from rich.console import Console from rich.logging import RichHandler from rich.progress import BarColumn, Progress,", "if typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch +=", "rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký", "timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first =", "bool = False, color: str | None = None, **kwargs, ): c =", "key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is not None: handler(key_press, done) with", "= args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False if", ") unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1)", "None: try: args.username = input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo", "max 1s) do konce a bylo prodlouženo na \" + str(current) ) except", "print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif cmd == \"check\": if os.path.exists(config_path):", "api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False", "################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace", "argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument(", "subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\",", "uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\",", "= self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects:", "advance: float | None = None, description: str | None = None, visible:", "\"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí", "elif key_press.key == Keys.F4: # for key_press in keys: # if key_press.key ==", "in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\",", "if api.is_partial_init: partial_init_notice() return [] output = [] with Progress() as progress: task", "keys: if done_on_enter and key_press.key == Keys.Enter: done() # elif key_press.key == Keys.F4:", "help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní", "== 0: if default is None: continue return default input_letter = inpt[0].lower() if", "rich_print(message, end=\"\", color=color) inpt = input() else: inpt = input(message) if len(inpt) ==", "get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode:", "pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s", "import shell except ImportError: import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance:", "partial_init_notice() return hotove = 0 nehotove = 0 for ukol in ukoly: if", "f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\"", "id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type", "dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if", "(data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if not create_file: raise FileNotFoundError()", "print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if", ") try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands =", "spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\",", "c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler(", "if args.url is None: try: args.url = input(\"URL adresa serveru: \") api.server_info.url =", "return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type ==", "None: self.progress = progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update( self,", ") ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]]", "složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\",", "if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př.", "KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné", "protože jsem zatím nepřišel na způsob, jak dostat hint při patchování metod (pomocí", "se při klávese Enter ukončí záznam kláves. Pozn.: Pokud True, tak se funkce", "id_old != id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old};", "[bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note == \"\" else", "api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od", "ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly =", "args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = (", "api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with Progress() as progress: task", "locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \"", "SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is", ") try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False", "- {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True", "# hledá modul `shell` jako \"globální\" modul (ne jako \"lokální\" ve složce), tudíž", "můžeme dotazovat (jen) přes `in` if not (\"disable_config\" in parsed): from_config = load_args_from_config()", "[] for task in tasks: thread = threading.Thread( target=task.function, args=( api, RichTask( progress,", "\"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu", "None = None, **kwargs, ): c = rich.get_console() if file is None else", "to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None, *,", "None = None verbose: int = 0 test: int | None = None", "= 1 for ukol in ukoly: try: if not zobraz_hotove and ukol.done: continue", "přítomna, nebude proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument(", "task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task:", "partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url is None: try: args.url =", "typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy! Old:", "bakalariapi.sessions.RequestsSession ) as session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\",", "v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None,", "když zbývalo \" + str(last) + \" (+ max 1s) do konce a", "key in keys: if isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\")", "output: list[bakalariapi.Komens] = [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání", "homeworks: # if not zobrazHotove and homework.Done: # continue # print(\"*** Domácí úkol", "while True: # ano/true/yes/1 / ne/false/no/0 if color is not None: rich_print(message, end=\"\",", "konfigurace není uložená\") elif cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print(", "unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else lasttime -", "is None else f\"({default})\") while True: inpt = input() if not inpt: if", "{length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh:", "= api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else lasttime - timedelta(5),", "\"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key == \"n\": obj.mark_as_done(api, False)", "None else \"; Výsledek testu:\") ) if o is not None: rich_print(o) except:", "str = \"\\n\", file: IO[str] | None = None, flush: bool = False,", "input(\"URL adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\",", "task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as", "dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\", True, \"yellow\", ): Command_Import() else:", "count = 1 for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count} z", ") if \"exit\" not in args.commands and (not args.no_import or args.auto_run): print() today", "else: print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd =", "nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá za běhu\",", "- {note}\") ) first = True for komens in filter( lambda x: x.grade", "+= 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám", "and delta >= timedelta(hours=-1): color = \"red\" elif is_before and delta <= timedelta(minutes=5):", "elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key ==", "{'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k", "short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí", "shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE", "api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None) and", "elif cmd == \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname))", "flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\",", "not zobraz_hotove and ukol.done: continue cls() show( ukol, f\"*** Domácí úkol {count} z", "args.username = input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací", "k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k", "print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return Init() else:", "current: print(\"\\n\") break last = current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo", "print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch = 0", "Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def Test5():", "přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def", "while True: inpt = input() if not inpt: if default is None: continue", "Args: handler: Funkce do které se passují zaznamenané klávesy. Bere 2 argumenty: key_press:", "rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try:", "bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID)", "byl delší jak náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána do souboru", "příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str = \"\", default: bool |", "parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\",", "https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za běhu\", nargs=\"?\", default=None,", "== \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud", "TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import install as tb_install from", "from rich.syntax import Syntax from rich.traceback import install as tb_install from urllib3.exceptions import", "Pro přepnutí do online módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne(", "task.update(advance=1) return output if force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if", ">= length: break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni klávasu... (Již", "None = None browser: str | None = None executable_path: str | None", "no # known parent package\") a `shell` se naimportuje \"přímo\" (resp. ne relativně),", "| None = None auto_run: bool = False no_init: bool = False no_import:", "else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším", "== \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná", "if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není", "= shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands,", ") parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede", "task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky =", "+ nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové", "rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def", "+ timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní a zítřejší schůzky:\")", "shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\",", "thread.start() threads.append(thread) for thread in threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl", "= api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59)", "handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude", "inpt: if default is None: continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná", "None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in filter(lambda x:", "zpravy: try: show(zprava, f\"*** Zpráva {count} z {length} ***\") count += 1 cls()", "try: show(schuzka, f\"*** Schůzka {count} z {length} ***\") count += 1 cls() except", "bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify =", "je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"open\": dirname", "se neprofiltrují # a nelze pro daný argument načíst hodnotu z configu (protože", "dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato", "False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne", "FileNotFoundError: pass if args.test is not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals,", "break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH", "= input() if not inpt: if default is None: continue return default if", "key_press in keys: if done_on_enter and key_press.key == Keys.Enter: done() # elif key_press.key", "f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2():", "zde tedy není jen druhá možnost, tak to je # kvůli tomu, že", "bool = True tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání", "JSONu, pokud byl delší jak náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána", "2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která při zavolání ukončí záznam", "bude problém, že se vše převádí na string, ale zatím to problém není,", "input_letter in \"aty1\": return True if input_letter in \"nf0\": return False def dialog_cislo(text:", "tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí", "(a navíc alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def", "známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), )", "args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné", "if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler |", "with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou", "done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif", "input(message) if len(inpt) == 0: if default is None: continue return default input_letter", "je tato flaga přítomna, vynutí se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\",", "disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return", ") ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if", ") CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url: str |", "ukoly: try: if not zobraz_hotove and ukol.done: continue cls() show( ukol, f\"*** Domácí", "except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1)", "if default else ' (Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0 if", "+= 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <=", "None = None): print(text, \"\" if default is None else f\"({default})\") while True:", "úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna,", "`None` má smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak", "Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne v", "end=\"\") while True: print( \"Současný zbývající čas: \" + str(last) + \" \"", "ignorovat, tudíž se brát v potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\",", "klávasu... (Již zobrazeno {offset} výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break", "api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else lasttime - timedelta(5), )", "with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\")", "# je složka), tak relativní `import` selže (\"ImportError: attempted relative import with no", "color=\"yellow\", ) def dialog_ano_ne( text: str = \"\", default: bool | None =", "= None, color: str | None = None ) -> bool: message =", "bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako hotový\", \"\" if", "done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku", "\" + str(last) + \" (+ max 1s) do konce a bylo prodlouženo", "z {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti", "čas: \" + str(last) + \" \" * 20, end=\"\\r\" ) # Some", "default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát", "nelze pro daný argument načíst hodnotu z configu (protože hodnota z configu #", "schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for ukol in", "\"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\",", "shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou", "0 ################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects: Any, sep: str =", "bool | None = None, refresh: bool = False, **fields, ): self.progress.update( self.task_id,", "length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1 for znamka", "logging.config import os import threading import time import traceback import warnings import webbrowser", "is not None: rich_print(message, end=\"\", color=color) inpt = input() else: inpt = input(message)", "import webbrowser from dataclasses import dataclass, field from datetime import datetime, timedelta from", "thread in threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní", "= from_config | parsed args = Args(**parsed) # Verbose: # 0 - Nic", "| None = None): if title is not None: print(title) if isinstance(obj, bakalariapi.Komens):", "Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(),", "prodlouženo na \" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): #", "v pořádku. # Pozn.: Pokud někdo dumá nad tím, proč zde tedy není", "\" \" * 20, end=\"\\r\" ) # Some spaces to rewrite previous text...", "KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data", "1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20:", "if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není", "Pokud není tento argument přítomen, program se zeptá za běhu\", nargs=\"?\", default=None, )", "1 - Warning; Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI # 3", "soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla načtena\") def", "stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None", "s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')},", "== 0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\") schuzky = fresh()", "last < current: print(\"\\n\") break last = current time.sleep(1) print( \"Sezení bylo prodlouženo,", "str, create_file: bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí file handler na", "očividně i negativní if not is_before and delta >= timedelta(hours=-1): color = \"red\"", "and (not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware", "shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers(", ") parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\",", "cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import", "False): def fresh(): if api.is_partial_init: partial_init_notice() return [] output = [] with Progress()", "Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async def", "f, ensure_ascii=False) # Odstraníme data, která jsou případně po JSONu, co jsme teď", "if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version", "task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI,", "logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger in [ logging.getLogger(name) for name", "Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v", "hotove = 0 nehotove = 0 for ukol in ukoly: if ukol.done: hotove", "= args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\",", "Pokračování\"] if enter_pokracovani else [] for key in keys: if isinstance(key, tuple): if", "color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele:", "False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" )", "typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len !=", "warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def partial_init_mode():", "integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web scraper, ale API", "= False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens]", "záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for", "action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, )", "importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného", "typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1", "zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"],", "fresh() length = len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\")", ") first = True for komens in filter( lambda x: x.grade == \"?\",", "print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH,", "task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask):", "short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou)", "typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\")", "obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting):", "- api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if", "import threading import time import traceback import warnings import webbrowser from dataclasses import", "je %.2f%%\" % probrallity) ################################################## ##### MAIN ##### ################################################## def main(): global api", "x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first =", "zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password = args.password", "API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") #", "\" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření", "try: with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID", "znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m.", "le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není", "inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude", "je {count_invalid} z {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100", "parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat", "fresh(): if api.is_partial_init: partial_init_notice() return [] output = [] with Progress() as progress:", "key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\"))", "== 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1 for", "ensure_ascii=False) # Odstraníme data, která jsou případně po JSONu, co jsme teď napsali", "end=\"\\r\" ) # Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current", "count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5)", "in \"nf0\": return False def dialog_cislo(text: str = \"\", default: int | None", "se passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce,", "Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if", "def Test4(): print(\"Tento test již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu:", "logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose <", "if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key ==", "t)() rich_print( f\"Test {ID} skončil\" + (\"\" if o is None else \";", "API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID", "f: # Indent, protože chci, aby to šlo přehledně upravit i z editoru", "datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online", "datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la =", "False): print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else", "je v pořádku. # Pozn.: Pokud někdo dumá nad tím, proč zde tedy", "data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená", "= len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved", "if ukol.done: hotove += 1 else: nehotove += 1 if hotove + nehotove", "def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter and key_press.key == Keys.Enter:", "\"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs):", "True for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first =", "inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, )", "= 1 for zprava in zpravy: try: show(zprava, f\"*** Zpráva {count} z {length}", "0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1 for schuzka", "nehotove = 0 for ukol in ukoly: if ukol.done: hotove += 1 else:", "se můžeme dotazovat (jen) přes `in` if not (\"disable_config\" in parsed): from_config =", "který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč,", "ukol.done: continue cls() show( ukol, f\"*** Domácí úkol {count} z {hotove + nehotove", "1 for ukol in ukoly: try: if not zobraz_hotove and ukol.done: continue cls()", "**fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def", "z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh:", "fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly", "1 if hotove + nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return", "(count_total - count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" %", "TYPE_CHECKING: from . import shell else: try: from . import shell except ImportError:", "True for schuzka in filter( lambda x: today_aware < x.start_time and x.start_time <", "print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" +", "for znamka in filter( lambda x: (x.need_confirm and not x.confirmed) or min(lasttime, today", "id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1,", "pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná", "task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo", "None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\"", "úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo", "default input_letter = inpt[0].lower() if input_letter in \"aty1\": return True if input_letter in", "ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ", "][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level),", "pro změnu nefugnuje při importu jako modul, jelikož v tom případě # hledá", "None, visible: bool | None = None, refresh: bool = False, **fields, ):", "Enter ukončí záznam kláves. Pozn.: Pokud True, tak se funkce v parametru handler", "\"\" if obj.done else \"green\"), \"N - Označí úkol jako nehotový\", \"Z -", "c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif", "probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\")", "nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen,", "parsed): from_config = load_args_from_config() if from_config is not None: parsed = from_config |", "total=0 ), ), ), ) thread.start() threads.append(thread) for thread in threads: thread.join() print()", "zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset = 0 cls() while offset", "Uložená data byla již importována, je tedy možné pracovat se starými daty\", color=\"yellow\",", "print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující", "{ukol.content}\" ) first = True for znamka in filter( lambda x: (x.need_confirm and", "= True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with", "fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1 for ukol in", "se odlášení sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", )", "unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length)", "def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else", "with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif", "get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell", "| None = None verbose: int = 0 test: int | None =", "`VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly", "for key in keys: if isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else:", "které se načtou a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens,", "to je # kvůli tomu, že ta zase pro změnu nefugnuje při importu", "from_config is not None: parsed = from_config | parsed args = Args(**parsed) #", "progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for", "Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new):", "logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None", "##### ################################################## def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a", "zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento", "help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None,", "import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys", "print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old):", "shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True,", "připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní", "proběhne znovu...\" ) return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False", "cls() show( ukol, f\"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove", "Info; Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI # 4 - Info", "print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True))", "= obj.start_time_delta color = \"\" # Delta totiž může být očividně i negativní", "test již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta:", "== Keys.ControlC: raise KeyboardInterrupt elif handler is not None: handler(key_press, done) with inpt.raw_mode():", "print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi", "elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before =", "| None = None username: str | None = None password: str |", "inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output", "31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany", "znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" +", "je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime", "spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) )", "typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old,", "%s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger in [", "{count} z {hotove + nehotove if zobraz_hotove else nehotove} ***\", ) count +=", "if force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0:", "+ str(last) + \" (+ max 1s) do konce a bylo prodlouženo na", "else (' (Ano)' if default else ' (Ne)')}: \" while True: # ano/true/yes/1", "typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}';", "program se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud", "api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta]", "tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\",", "def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data() new", "logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium:", "color=color) inpt = input() else: inpt = input(message) if len(inpt) == 0: if", "print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999,", "Výsledek testu:\") ) if o is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\",", "do dané funkce. Args: handler: Funkce do které se passují zaznamenané klávesy. Bere", "return cls() count = 1 for zprava in zpravy: try: show(zprava, f\"*** Zpráva", "parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se", "v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if", "'{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None,", "if offset >= length: break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni", "# for key_press in keys: # if key_press.key == Keys.Escape: # raise SystemExit", "\"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na", "args.username is None: try: args.username = input(\"Přihlašovací jméno: \") api.username = args.username except", "shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\",", "f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if", "zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl()", "přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\",", "= float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break last", "spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser", "timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens zprávy:\") rich_print(", "parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede se odlášení sessionů a", "ze serveru\") zpravy = fresh() length = len(zpravy) if length == 0: print(\"Nebyly", "\"Současný zbývající čas: \" + str(last) + \" \" * 20, end=\"\\r\" )", "zmáčklé klávesy, které následně passuje do dané funkce. Args: handler: Funkce do které", "keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter: bool = True,", "z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz", "break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset}", "in keys: if done_on_enter and key_press.key == Keys.Enter: done() # elif key_press.key ==", "True tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky,", "api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current:", "progress, progress.add_task( task.description, start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread) for thread", "show(obj: bakalariapi.objects.BakalariObject, title: str | None = None): if title is not None:", "None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str)", "list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání", "dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset = 0 cls() while", "new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch", "dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která", "color=\"yellow\", ) def ask_import() -> bool: try: if args.no_import: if dialog_ano_ne( \"Server není", "mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is not None: handler(key_press,", "= len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls()", "count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False):", "refresh: bool = False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible,", "python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh", "RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from", "při zavolání ukončí záznam kláves. Pokud je `None`, nic se nevolá. Hodnota `None`", "1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def", "přes `in` if not (\"disable_config\" in parsed): from_config = load_args_from_config() if from_config is", "jelikož shell se nachází v omezeném módu. Pro přepnutí do online módu můžete", "handler: Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter: bool = True, mask_keyboard_interrupt:", "start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved:", "from_date=None if lasttime is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start()", "import RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax", "print(\"\\n\") break last = current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \"", "visible: bool | None = None, refresh: bool = False, **fields, ): self.progress.update(", "not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if", "dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) )", "tento argument přítomen, program se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\",", "na \" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\"", "api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if not args.no_init: successful_init", "# rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete", "nejspíše nikdy nedojde) # (a navíc alespoň nemusí řešit formátování při \"config show\")", "`cast()` protože jsem zatím nepřišel na způsob, jak dostat hint při patchování metod", "len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count", "4: for logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"):", "print( \"Sezení bylo prodlouženo, když zbývalo \" + str(last) + \" (+ max", "bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove = 0 for ukol in ukoly:", "nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je", "output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None =", "* 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new):", "{ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\")", "count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False):", "as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka", "##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla", "ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note == \"\" else f\"", "modul, jelikož v tom případě # hledá modul `shell` jako \"globální\" modul (ne", "je \"disable_config\" # v \"parsed\" tak má hodnotu `True`, tudíž se můžeme dotazovat", "-> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with Progress()", "api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool", "default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje", "| None = None executable_path: str | None = None verbose: int =", "if force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0:", "Console(file=file) if color is not None: # Pravděpodobně někdy bude problém, že se", "# (a navíc alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4)", "= fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v", "from . import shell else: try: from . import shell except ImportError: import", "\"přímo\" (resp. ne relativně), což už je v pořádku. # Pozn.: Pokud někdo", "file: IO[str] | None = None, flush: bool = False, color: str |", "bool | None = None, color: str | None = None ) ->", "timedelta(hours=-1): color = \"red\" elif is_before and delta <= timedelta(minutes=5): color = \"yellow\"", "= [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False,", "get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané", "print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len =", "# v \"parsed\" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes", "def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True:", "count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ##################################################", "hotove += 1 else: nehotove += 1 if hotove + nehotove == 0:", "\"parsed\" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in` if", "first = False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu", "získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\",", "print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True: last =", "%m. %Y')}\" + (\"\" if note == \"\" else f\" - {note}\") )", "key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True))", "KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str", "len(inpt) == 0: if default is None: continue return default input_letter = inpt[0].lower()", "\"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", )", "False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}'", "úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True,", "elif is_before and delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře", "command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\",", "== \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší řešení pro", "= bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify = False if", "není tento argument přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, )", "== 0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\") zpravy = fresh()", "zavolání ukončí záznam kláves. Pokud je `None`, nic se nevolá. Hodnota `None` má", ") args.password = \"\" api.password = args.password try: rich_print( f\"Kontrola stavu serveru a", "color=\"yellow\" ) args.password = \"\" api.password = args.password try: rich_print( f\"Kontrola stavu serveru", "\"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\",", "args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False if args.username", "namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\")", "typ_mismatch = 0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet", "serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo", "{count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti", "not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"):", ").get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy", "True) print(\"Úkol označen jako hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen", "= patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False` (v `requests` modulu),", "někdo dumá nad tím, proč zde tedy není jen druhá možnost, tak to", "přítomna, vynutí se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser =", "schuzky = fresh() length = len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné", "\") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" )", "args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False,", "argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser", "zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password is None: try: args.password", ") if len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus", "\"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", )", "= len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try:", "create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode,", "f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů:", "shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, ) )", "to šlo přehledně upravit i z editoru (i když k tomu nejspíše nikdy", "bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si", "datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first = True for znamka in", "konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře", "None = None username: str | None = None password: str | None", "| None: global args with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return", "\"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž se brát v potaz pouze", "= f\"{text} Ano/Ne{'' if default is None else (' (Ano)' if default else", "= bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch = 0", "spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands and (not", "{count_invalid} z {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost", "to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end,", "zatím to problém není, tak to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\",", "help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None,", "= None, completed: float | None = None, advance: float | None =", "continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None =", "bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in", "= None, advance: float | None = None, description: str | None =", "for znamka in filter( lambda x: min(lasttime, today - timedelta(5)) < x.date1 and", "args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\",", "color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands", "plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() ->", "key_press.key == Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC:", "f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou případně po JSONu, co", "x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva", "RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved =", "59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error:", "print(\"Tento test již není podporován... Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly", "def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = []", "color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while", "try: if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\", True,", "level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging", "\"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str = \"\", default: bool | None", "= parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", )", "executable_path: str | None = None verbose: int = 0 test: int |", "progress: threads: list[threading.Thread] = [] for task in tasks: thread = threading.Thread( target=task.function,", "= Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj,", "HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if", "konce a bylo prodlouženo na \" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\")", "TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs import requests import rich from", "if lasttime is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for", "| None = None if args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()],", "rich_print( \"Server není dostupný; Uložená data byla již importována, je tedy možné pracovat", "passuje do dané funkce. Args: handler: Funkce do které se passují zaznamenané klávesy.", "f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)};", "uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else:", "None: parsed = from_config | parsed args = Args(**parsed) # Verbose: # 0", "color=\"yellow\", ) def Command_Komens(limit: int | None = None, force_fresh: bool = False):", "args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\",", "uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou", "+= 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám", "ne relativně), což už je v pořádku. # Pozn.: Pokud někdo dumá nad", "args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna,", "if default is None else (' (Ano)' if default else ' (Ne)')}: \"", "při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args,", "neproběhla úspěšně a shell poběží v omezeném módu.\\nPro přepnutí do plného módu zkuste", "= len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit", "x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední", "try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except", "= 0 cls() while offset < length: try: for _ in range(count): if", "note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne", "default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu);", "\"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url: str | None = None", "def save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci, aby to", ") except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove = 0 for ukol", "shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\",", "získáni, počet studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25)", "parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje", "key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject,", "zobraz_hotove else nehotove} ***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\") break def", "__import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o =", "len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved =", "řešení pro poslední řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event()", "is None: try: args.username = input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt:", "print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\") schuzky = fresh() length =", "not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\",", "for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy = fresh()", "f\"*** Zpráva {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\")", "offset >= length: break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni klávasu...", "logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info(", "api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def", "handler: Funkce do které se passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný", "testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\",", "async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter: bool", "end: str = \"\\n\", file: IO[str] | None = None, flush: bool =", "kláves. Pozn.: Pokud True, tak se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud", "{length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool", "= len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se počet záznamů pro typ", "shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True)", "api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\")", "zkouším načíst ze serveru\") schuzky = fresh() length = len(schuzky) if length ==", "parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž se brát v", "key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key", "length: try: for _ in range(count): if offset >= length: break print(studenti[offset].format()) offset", "annotations import argparse import asyncio import getpass import inspect import json import logging", "while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný", "if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) )", "continue return default input_letter = inpt[0].lower() if input_letter in \"aty1\": return True if", "dialog_ano_ne( text: str = \"\", default: bool | None = None, color: str", "dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note == \"\" else f\" -", "not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\",", "# kvůli tomu, že ta zase pro změnu nefugnuje při importu jako modul,", "-> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném", "Pokud je `None`, nic se nevolá. Hodnota `None` má smysl pouze pokud parametr", "= os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost", "str = \"r+\") -> IO: \"\"\"Vrátí file handler na daný soubor `file` v", "or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first =", "def Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init: partial_init_notice() return [] output", "with no # known parent package\") a `shell` se naimportuje \"přímo\" (resp. ne", "úspěšně a shell poběží v omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat", "None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\")", "se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity = (count_total", "inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance", "zní líp :)\", ) if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\",", "print( \"Současný zbývající čas: \" + str(last) + \" \" * 20, end=\"\\r\"", "Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get(", "argument načíst hodnotu z configu (protože hodnota z configu # se přepíše hodnotou", "Args(**parsed) # Verbose: # 0 - Nic # 1 - Warning; Pouze BakalářiAPI", "today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens", "partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\\nPro přepnutí do", "offset += 1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z", "def Test3(): print(\"Tento test již není podporován... Sadge\") return # return API.GetHomeworksIDs() def", "note == \"\" else f\" - {note}\") ) first = True for komens", "aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command(", "list[KeyPress]): for key_press in keys: if done_on_enter and key_press.key == Keys.Enter: done() #", "negativní if not is_before and delta >= timedelta(hours=-1): color = \"red\" elif is_before", "se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo;", "teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš současný)", "Pokud True, tak se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak", "obj.done else \"green\"), \"N - Označí úkol jako nehotový\", \"Z - Zobrazí HTML", "else ' (Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0 if color is", "mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.:", "jako nehotový\") elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else:", "x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední známky:\") note", "with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in", "prodlouženo, když zbývalo \" + str(last) + \" (+ max 1s) do konce", "počet studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except", "+ ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\" ) )", "předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje jsou správné\",", "schůzky\") return cls() count = 1 for schuzka in schuzky: try: show(schuzka, f\"***", "= api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output", "import annotations import argparse import asyncio import getpass import inspect import json import", "verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan]", "metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá za běhu\",", "jako \"globální\" modul (ne jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from", "\".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None): if title is", "partial_init_mode() return False if args.password is None: try: args.password = getpass.getpass(\"Heslo: \") except", "help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\",", "continue cls() show( ukol, f\"*** Domácí úkol {count} z {hotove + nehotove if", "\"\"\" evnt = asyncio.Event() inpt = create_input() done = lambda: evnt.set() def key_handler_proc(keys:", "os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y,", "unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se", "tento argument přítomen, program se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\",", "celkových {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost", "type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\",", "schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31, 23,", "rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as", "shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser = shell.ShellArgumentParser()", "= api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze", "unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask],", "serveru\", color=\"red\") partial_init_mode() return False if args.username is None: try: args.username = input(\"Přihlašovací", "input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\" ) cls()", "= False no_import: bool = False disable_config: bool = False commands: list[str] =", "def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f:", "print(\"Žádná konfigurace není uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path) # =", "dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má spustit\", #", "help=\"Pokud je tato flaga přítomna, nebude proveden import dat (z hlavního souboru)\", action=\"store_true\",", "pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako", "< 4: for logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if", "(x.need_confirm and not x.confirmed) or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ):", "as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack na to, aby `bakalarishell`", "/ count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## #####", "count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output)", "shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty", "není dostupný; Chce importovat uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else:", "with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) )", "id_len_mismatch = 0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů v datech", "informace o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je", "default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program", "else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def", "Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt", "help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému", ") except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls()", "hotové úkoly?\") # cls() # for homework in homeworks: # if not zobrazHotove", "Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler))", "= True for znamka in filter( lambda x: (x.need_confirm and not x.confirmed) or", "řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt = create_input()", "\"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\")", "str(last) + \" (+ max 1s) do konce a bylo prodlouženo na \"", "open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent, protože", "zatím nepřišel na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u) session", "api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na", "parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command(", "= get_io_filepath(file) if not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with", "bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID)", "True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně", "api.password = args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro uživatele", "parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden import dat (z hlavního", "ta zase pro změnu nefugnuje při importu jako modul, jelikož v tom případě", "1 else: nehotove += 1 if hotove + nehotove == 0: print(\"Nebyly nalezeny", "jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from . import shell else:", "- Pokračování\"] if enter_pokracovani else [] for key in keys: if isinstance(key, tuple):", "new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch =", "if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m.", "asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress,", "uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if not create_file: raise", "\"red\" elif is_before and delta <= timedelta(minutes=5): color = \"yellow\" elif is_before and", "done_on_enter: Pokud True, tak se při klávese Enter ukončí záznam kláves. Pozn.: Pokud", "zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in", "úkoly?\") count = 1 for ukol in ukoly: try: if not zobraz_hotove and", "show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): #", "print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků", "**kwargs, ): c = rich.get_console() if file is None else Console(file=file) if color", "ale zatím to problém není, tak to neřeším eShrug objects = tuple(map(lambda x:", "k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print(", "načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() # for", "(old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb", "= dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ##################################################", "today_aware < x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first:", ") subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační", "Test6(): count_total = 0 count_invalid = 0 try: while True: count_total += 1", "objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice():", "= len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls()", "action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak", "-> IO: \"\"\"Vrátí file handler na daný soubor `file` v uživatelské (data) složce.\"\"\"", "{old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new in", "unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky = fresh() else: schuzky =", "se nevykonalo, jelikož konfigurace není uložená\") elif cmd == \"check\": if os.path.exists(config_path): s", "= platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass", "if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky", "if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\") schuzky", "data z daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\",", "obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old !=", "bool = False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output:", "print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first", "while offset < length: try: for _ in range(count): if offset >= length:", "False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce.", "Takový hack na to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky", "{s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"open\": dirname =", ") parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které", "tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\",", "= task_id def start(self): self.progress.start_task(self.task_id) def update( self, total: float | None =", "color=\"red\") partial_init_mode() return False if args.username is None: try: args.username = input(\"Přihlašovací jméno:", "task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None else", "funkci, lepší řešení pro poslední řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt", ") ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser =", "cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def fresh(): if", "configu # se přepíše hodnotou \"None\" z argparse) parsed = {k: v for", "case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke", "else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id)", "in zpravy: try: show(zprava, f\"*** Zpráva {count} z {length} ***\") count += 1", "\"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False,", "None password: str | None = None browser: str | None = None", "(\"\" if o is None else \"; Výsledek testu:\") ) if o is", "json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano...", "- {ukol.content}\" ) first = True for znamka in filter( lambda x: (x.need_confirm", "range(count): if offset >= length: break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování", "simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done:", "Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci,", "propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází.", "`KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt`", "zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly", "( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\" ) ) with", "- timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def", ") ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers", "session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas: \" +", "cmd == \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else:", ") except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet studentů je", "není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\":", "api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not None: RunTest(args.test) prepare_shell() # Chceme", "získány informace o stavu serveru, ale žádné funkce by tímto neměli být ovlivněny\",", "ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\",", "(nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), )", "return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit", "print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def", "Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init,", "min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False", "pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len(", "session.session.verify = False if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch(", "print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except", "zdrojové složky # Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak", "= None executable_path: str | None = None verbose: int = 0 test:", "již není podporován... Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") #", "= asyncio.Event() inpt = create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for", "partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1", "False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def", "\"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to", "action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu", "``` \"\"\" evnt = asyncio.Event() inpt = create_input() done = lambda: evnt.set() def", "== \"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z", "nehotove} ***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool", "| str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani else [] for", "{'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není", "bool = False, force_fresh: bool = False): print(\"Načítání úkolů...\") try: if fast: ukoly", "z celkových {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100 print(\"Konečná", "tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in` if not", "*objects: Any, sep: str = \" \", end: str = \"\\n\", file: IO[str]", "eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def", "známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze", ") parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí", "\"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí", "filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False", "se ale spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je", "ukončí záznam kláves. Pozn.: Pokud True, tak se funkce v parametru handler nevolá.", "api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", )", "help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá", "(max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude", "argumenty pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují # a nelze pro", "getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\" if o is None else", "{typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch += 1", "type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou a tím i zrychlí", "progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse(", "api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\")", "schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky", "from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from", "enter_pokracovani else [] for key in keys: if isinstance(key, tuple): if key[1] ==", "action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude", "Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh", "first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M", "navíc alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl():", "##### FUNKCE ##### ################################################## def rich_print( *objects: Any, sep: str = \" \",", "id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(),", "attempted relative import with no # known parent package\") a `shell` se naimportuje", "] ) def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key ==", "z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh:", "= api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError:", "\"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password", "count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je", "pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují # a nelze pro daný", "má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) )", "{count_invalid} z celkových {count_total}\") probrallity = (count_total - count_invalid) / count_total * 100", "\"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\"", "jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode()", "print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani", "(jen) přes `in` if not (\"disable_config\" in parsed): from_config = load_args_from_config() if from_config", "else \"green\"), \"N - Označí úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\",", "\"o\": webbrowser.open(obj.join_url) elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) #", "print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1 for zprava in", "homework in homeworks: # if not zobrazHotove and homework.Done: # continue # print(\"***", "shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \" +", "raise KeyboardInterrupt elif handler is not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda:", "try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print(", "metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True,", "editoru (i když k tomu nejspíše nikdy nedojde) # (a navíc alespoň nemusí", "except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k", "nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count =", "api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse(", "`KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress,", "tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je", "= None, force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice()", "je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return", "import os import threading import time import traceback import warnings import webbrowser from", "{type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u", "který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud", "nic se nevolá. Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`.", "větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací", "not x.confirmed) or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first:", "jak dostat hint při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound", "else f\" - {note}\") ) first = True for komens in filter( lambda", "import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs import requests import", "argument přítomen, program se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\",", "len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším načíst ze serveru\") schuzky =", "api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread)", "\"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\",", "ne/false/no/0 if color is not None: rich_print(message, end=\"\", color=color) inpt = input() else:", "None, advance: float | None = None, description: str | None = None,", "requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru.", "try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně", "f\"*** Známka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\")", "task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None", "print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta", "in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New:", "parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou a tím", "from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback", "online módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str =", "f\"({default})\") while True: inpt = input() if not inpt: if default is None:", "True if input_letter in \"nf0\": return False def dialog_cislo(text: str = \"\", default:", "není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True)", "= False if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__", "in ukoly: try: if not zobraz_hotove and ukol.done: continue cls() show( ukol, f\"***", "len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID)", "na práci s uloženou konfigurací\", spread_arguments=False, ) ) if __name__ == \"__main__\": main()", "else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\"", "= field(default_factory=list) args: Args class RichTask: def __init__(self, progress: Progress, task_id: TaskID) ->", "input_letter in \"nf0\": return False def dialog_cislo(text: str = \"\", default: int |", "potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\")", "flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\",", "FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def", "None = None password: str | None = None browser: str | None", "`verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ##################################################", "successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané", "studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má", "from __future__ import annotations import argparse import asyncio import getpass import inspect import", "1 for zprava in zpravy: try: show(zprava, f\"*** Zpráva {count} z {length} ***\")", "nevolá. Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud", "načíst ze serveru\") schuzky = fresh() length = len(schuzky) if length == 0:", "brát v potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument(", "`urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init()", "None, completed: float | None = None, advance: float | None = None,", "- Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done:", "not (\"disable_config\" in parsed): from_config = load_args_from_config() if from_config is not None: parsed", "[ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly),", "help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"password\",", "NOSET if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\",", "elif handler is not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await", "def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url)", "rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\",", "nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None =", "parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá za", ") parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command(", "souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu';", "bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count =", "znamka in filter( lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade", "tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser,", "zobrazit již hotové úkoly?\") # cls() # for homework in homeworks: # if", "nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try:", "vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí do online módu", "json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): # `cast()`", "| None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool = False, ): \"\"\"", "zobrazeno {offset} výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast:", "do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False) as", "print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\") zpravy = fresh() length =", "from_config | parsed args = Args(**parsed) # Verbose: # 0 - Nic #", "ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\",", "f\"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch", "first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\"", ") ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command(", "data byla již importována, je tedy možné pracovat se starými daty\", color=\"yellow\", )", "RichTask], None] start: bool = True tasks: list[Task] = [ Task(\"Získání Komens zpráv\",", "přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní", "== \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): #", "in schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length} ***\") count += 1", "se brát v potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, )", "web scraper, ale API zní líp :)\", ) if parser.prog == \"\": parser.prog", "in keys: if isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else:", "bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument(", "\"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) #", "komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\",", "prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int,", "description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0", "-> bool: try: if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená", "\" * 20, end=\"\\r\" ) # Some spaces to rewrite previous text... session.get(", "visible=visible, refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ##################################################", "``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt = create_input() done =", "shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE", "except KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné pokusy: {la - le};", "argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, ) ) if __name__ ==", "return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit", "zpravy = fresh() length = len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné", "Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z", "api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date", "break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH", "= obj.is_before_start delta = obj.start_time_delta color = \"\" # Delta totiž může být", "\"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass", "evnt = asyncio.Event() inpt = create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]):", "**fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE", "příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po", "z editoru (i když k tomu nejspíše nikdy nedojde) # (a navíc alespoň", "za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není", "Test3(): print(\"Tento test již není podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4():", "+ \" \" * 20, end=\"\\r\" ) # Some spaces to rewrite previous", "!= id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID", "try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, )", "min(lasttime, today - timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if", "args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone()", "None): print(text, \"\" if default is None else f\"({default})\") while True: inpt =", "args.url = input(\"URL adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána", "not None: parsed = from_config | parsed args = Args(**parsed) # Verbose: #", "cls() def Test6(): count_total = 0 count_invalid = 0 try: while True: count_total", "souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser =", "def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd ==", "cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo,", "zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy", "print(\"Tento test již není podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento", "cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try:", "(třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš současný) f.truncate() print(f\"JSON", "při klávese Enter ukončí záznam kláves. Pozn.: Pokud True, tak se funkce v", "(%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger in [ logging.getLogger(name)", "print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress,", "{typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old, id_new,", "[] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0)", "online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as", "dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám", "konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID: int): m", "přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True,", "def patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel na", "dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print(", "= fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v", "unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky = fresh() else:", "{count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity = (count_total - count_invalid) /", "if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with", "or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\"", "print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif", ". import shell else: try: from . import shell except ImportError: import shell", "Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro argparse MUSÍ", "do konce a bylo prodlouženo na \" + str(current) ) except KeyboardInterrupt: print(\"Testování", "None ) -> bool: message = f\"{text} Ano/Ne{'' if default is None else", "if old_id_len != new_id_len: print( f\"Neshodující se počet záznamů pro typ {typ_old}! Old:", "api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is", "\"globální\" modul (ne jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from .", "bool = False commands: list[str] = field(default_factory=list) args: Args class RichTask: def __init__(self,", "5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat,", "return [] output = [] with Progress() as progress: task = RichTask( progress,", "= get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f:", "task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id", "jméno\", color=\"red\") partial_init_mode() return False if args.password is None: try: args.password = getpass.getpass(\"Heslo:", "bylo prodlouženo, když zbývalo \" + str(last) + \" (+ max 1s) do", "getpass import inspect import json import logging import logging.config import os import threading", "načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové", "args.verbose < 4: for logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]:", "short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud", "pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má", "pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak se při klávese Enter", "else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze", "ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace", "\"nf0\": return False def dialog_cislo(text: str = \"\", default: int | None =", "= shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest,", "Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\"", "datetime import datetime, timedelta from typing import IO, TYPE_CHECKING, Any, Callable, cast import", "rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False if args.username is None: try:", "bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if not args.no_init: successful_init = Init()", "= RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID)", "if note == \"\" else f\" - {note}\") ) first = True for", "= shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede se odlášení", "Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\",", "odlášení sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command(", "if done_on_enter and key_press.key == Keys.Enter: done() # elif key_press.key == Keys.F4: #", "shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\",", "%d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else", "= True selenium: bakalariapi.SeleniumHandler | None = None if args.browser is not None:", "list[bakalariapi.Komens] = [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\",", "(old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new", "return False rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with", "None = None ) -> bool: message = f\"{text} Ano/Ne{'' if default is", "API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test již není", ") def Test3(): print(\"Tento test již není podporován... Sadge\") return # return API.GetHomeworksIDs()", "e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass", "mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje", "v Lootingu, zkouším načíst ze serveru\") zpravy = fresh() length = len(zpravy) if", "uložená\") elif cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je", "True for znamka in filter( lambda x: (x.need_confirm and not x.confirmed) or min(lasttime,", "bool = False): print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if", "= locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\"", "\"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní `import` selže (\"ImportError: attempted", "Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku je: ``` await", "lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter and key_press.key", "if args.username is None: try: args.username = input(\"Přihlašovací jméno: \") api.username = args.username", "nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands and (not args.no_import or", "= False, color: str | None = None, **kwargs, ): c = rich.get_console()", "try: show(zprava, f\"*** Zpráva {count} z {length} ***\") count += 1 cls() except", "help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command(", "= 0 test: int | None = None auto_run: bool = False no_init:", "\"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\",", "= datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError:", "create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if", "filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print(", "= rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \",", "finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE ##### ################################################## def", "+= 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def", "verbose: int = 0 test: int | None = None auto_run: bool =", "len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count", "previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if", "{znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed)", ") def dialog_ano_ne( text: str = \"\", default: bool | None = None,", "shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti,", "*, done_on_enter: bool = True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat", "Funkce do které se passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk", "os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config():", "epilog=\"Ano, ano, ano... Actually je to web scraper, ale API zní líp :)\",", "patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults()", "lambda x: (x.need_confirm and not x.confirmed) or min(lasttime, today - timedelta(5)) < x.time,", "\"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE,", "= threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ),", "else: output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None):", "api.is_partial_init: partial_init_notice() return [] output = [] with Progress() as progress: task =", "= znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d.", "None, refresh: bool = False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description,", "prompt_toolkit.keys import Keys from rich.console import Console from rich.logging import RichHandler from rich.progress", "Console from rich.logging import RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from", "offset = 0 cls() while offset < length: try: for _ in range(count):", "= 0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů", "print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN ##### ################################################## def", "count = 1 for zprava in zpravy: try: show(zprava, f\"*** Zpráva {count} z", "# a nelze pro daný argument načíst hodnotu z configu (protože hodnota z", ").get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] =", "import platformdirs import requests import rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input", "elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined", "os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace", "today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first = True", "po předchozím JSONu, pokud byl delší jak náš současný) f.truncate() print(f\"JSON data vygenerována", "lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející", "spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak", "completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time", "rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import", "hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter - Pokračování\"]", "x: (x.need_confirm and not x.confirmed) or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens),", "parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\",", "souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\",", "skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def", "datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0)", "if first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z", ") ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command(", "default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program se", "(ne jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from . import shell", "komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\")", "znamka in znamky: try: show(znamka, f\"*** Známka {count} z {length} ***\") count +=", "raise Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]],", "default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str],", "Ano/Ne{'' if default is None else (' (Ano)' if default else ' (Ne)')}:", "\"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data)", "parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá", "`main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven -", "kláves. Pokud je `None`, nic se nevolá. Hodnota `None` má smysl pouze pokud", "# elif key_press.key == Keys.F4: # for key_press in keys: # if key_press.key", "# print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6():", "args with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return parsed parser =", "a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name,", "# zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() # for homework", "api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59) )", "Any, Callable, cast import bakalariapi import platformdirs import requests import rich from bakalariapi.utils", "if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d.", "nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován.", "uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config,", "except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky", "default is None else f\"({default})\") while True: inpt = input() if not inpt:", "obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji", "bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login()", "is None else \"; Výsledek testu:\") ) if o is not None: rich_print(o)", "Pokud někdo dumá nad tím, proč zde tedy není jen druhá možnost, tak", "input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test již není podporován... Sadge\") return", "start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread) for thread in threads: thread.join()", "FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length =", "byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if", "zobrazit již hotové úkoly?\") count = 1 for ukol in ukoly: try: if", "def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if", "= \"red\" elif is_before and delta <= timedelta(minutes=5): color = \"yellow\" elif is_before", "== Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise", "{type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" )", "<= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených", "jelikož konfigurace není uložená\") elif cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path)", "import Keys from rich.console import Console from rich.logging import RichHandler from rich.progress import", "old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se", "shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command(", "bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh:", "f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\",", "True for komens in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if", "podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován...", "lasttime is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id", "příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, )", "def Test5(): print(\"Tento test již není podporován... Sadge\") return # homeworks = API.GetHomeworks()", "for _ in range(count): if offset >= length: break print(studenti[offset].format()) offset += 1", "print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas: \" + str(last) + \"", "sep: str = \" \", end: str = \"\\n\", file: IO[str] | None", "InsecureRequestWarning # Takový hack na to, aby `bakalarishell` šel spustit také přímo ze", "již hotové úkoly?\") count = 1 for ukol in ukoly: try: if not", "None, flush: bool = False, color: str | None = None, **kwargs, ):", "= False, force_fresh: bool = False): print(\"Načítání úkolů...\") try: if fast: ukoly =", "return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené", "with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str: return", ") def Command_Komens(limit: int | None = None, force_fresh: bool = False): def", "\"\"\"Vrátí file handler na daný soubor `file` v uživatelské (data) složce.\"\"\" path =", "pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\" ) cls() except KeyboardInterrupt:", "None: try: args.url = input(\"URL adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt:", "importu jako modul, jelikož v tom případě # hledá modul `shell` jako \"globální\"", "není uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if", "\"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát\",", "\"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = [] for task in", "try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID)", "Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import()", "dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument", "(\"\" if note == \"\" else f\" - {note}\") ) first = True", "if successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print(", ") parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá", "dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count = 1 for ukol in ukoly: try:", "################################################## ##### TESTY ##### ################################################## def RunTest(ID: int): m = __import__(__name__) t =", "= \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False)", "if o is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else:", "end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell se nachází", "short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\",", ") shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, )", "KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky =", "else nehotove} ***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice:", "argument přítomen, program se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací", "bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh:", "módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True,", "modul (= přes `import`), tak vše proběhne v pořádku # Pokud se ale", "studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return", "global shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals()", "hotove + nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny", ") ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command(", "def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except", "parsed = from_config | parsed args = Args(**parsed) # Verbose: # 0 -", "elif cmd == \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená", "shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser,", "RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test", "keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt = create_input() done = lambda: evnt.set()", "1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def fresh():", "číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity = (count_total - count_invalid)", "parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší", "getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\"", ").get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description:", "se načtou a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí", "key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\")", "Args class RichTask: def __init__(self, progress: Progress, task_id: TaskID) -> None: self.progress =", "try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena,", "| None = None, advance: float | None = None, description: str |", "módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", )", "klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True", "Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\",", "se vše převádí na string, ale zatím to problém není, tak to neřeším", "nepřišel na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u) session =", "print( f\"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" )", "\"-d\", \"--disable-config\", help=\"Soubor s konfigurací se bude ignorovat, tudíž se brát v potaz", "), ), ) thread.start() threads.append(thread) for thread in threads: thread.join() print() autorun() else:", "get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou případně", ".replace(hour=0, minute=0, second=0, microsecond=0) ) first = True for znamka in filter( lambda", "úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte", "dumá nad tím, proč zde tedy není jen druhá možnost, tak to je", "unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy = fresh() else: zpravy =", "for zprava in zpravy: try: show(zprava, f\"*** Zpráva {count} z {length} ***\") count", "i po filtrování je \"disable_config\" # v \"parsed\" tak má hodnotu `True`, tudíž", "parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None,", "logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None = None", "schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with", "True: print( \"Současný zbývající čas: \" + str(last) + \" \" * 20,", "api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\"", "Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun():", "in \"aty1\": return True if input_letter in \"nf0\": return False def dialog_cislo(text: str", "def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if", "= [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False,", "funkce by tímto neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return", "\"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje", "opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\",", "nevykonalo, jelikož konfigurace není uložená\") elif cmd == \"check\": if os.path.exists(config_path): s =", "proč zde tedy není jen druhá možnost, tak to je # kvůli tomu,", "): if id_old != id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}'", "Lootingu, zkouším načíst ze serveru\") zpravy = fresh() length = len(zpravy) if length", "{length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset", "try: o = getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\" if o", "# Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float(", "po zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) #", "field from datetime import datetime, timedelta from typing import IO, TYPE_CHECKING, Any, Callable,", "tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto", "mode: str = \"r+\") -> IO: \"\"\"Vrátí file handler na daný soubor `file`", "done: Callable): key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key ==", "\"\" api.password = args.password try: rich_print( f\"Kontrola stavu serveru a přihlašovacích údajů pro", "None, force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return", "zobraz_hotove and ukol.done: continue cls() show( ukol, f\"*** Domácí úkol {count} z {hotove", "'[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů:", "color: str | None = None ) -> bool: message = f\"{text} Ano/Ne{''", "zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším načíst", "v is not None} # Jelikož hodnoty filtrujeme, tak pokud i po filtrování", "zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se", "= False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH", "+ nehotove if zobraz_hotove else nehotove} ***\", ) count += 1 except KeyboardInterrupt:", "přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud", "\"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede se odlášení sessionů a aplikace", "print_keys( [(\"O - Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"]", "locals() print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if", "color=\"yellow\", ) if \"exit\" not in args.commands and (not args.no_import or args.auto_run): print()", "new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se počet záznamů pro", "0 cls() while offset < length: try: for _ in range(count): if offset", "`file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if not", "{len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new:", "def load_args_from_config() -> dict | None: global args with get_io_file(CONFIG_FILE, True) as f:", "# 0 - Nic # 1 - Warning; Pouze BakalářiAPI # 2 -", "zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která při", "+= 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch,", "KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands]", "count = 1 for znamka in znamky: try: show(znamka, f\"*** Známka {count} z", "for k, v in vars(parser.parse_args()).items() if v is not None} # Jelikož hodnoty", "False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode()", "bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\"", "import requests import rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input", "Funkce, která při zavolání ukončí záznam kláves. Pokud je `None`, nic se nevolá.", "= False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None = None if", ") thread.start() threads.append(thread) for thread in threads: thread.join() print() autorun() else: rich_print( \"Autorun", "is None: continue return default input_letter = inpt[0].lower() if input_letter in \"aty1\": return", "o is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test", "\"Nebyly získány informace o stavu serveru, ale žádné funkce by tímto neměli být", "= False x = f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login() return", "ukoly: if ukol.done: hotove += 1 else: nehotove += 1 if hotove +", "progress.add_task( task.description, start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread) for thread in", "\"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se získání dat ze serveru\", default=False,", "shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export,", "vars(parser.parse_args()).items() if v is not None} # Jelikož hodnoty filtrujeme, tak pokud i", "requests import rich from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from", "args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\", True, \"yellow\", ):", "| None = None, description: str | None = None, visible: bool |", "global api global args def load_args_from_config() -> dict | None: global args with", "path = get_io_filepath(file) if not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True)", "\"--test\", type=int, help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument(", "end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je", "dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is", "flaga přítomna, nebude proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, )", "v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done:", "except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla", "rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je", "print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if", "obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol", "jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID: int): m = __import__(__name__)", "color: str | None = None, **kwargs, ): c = rich.get_console() if file", "test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato", "nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data", "parent package\") a `shell` se naimportuje \"přímo\" (resp. ne relativně), což už je", "float | None = None, completed: float | None = None, advance: float", "= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0,", "= None verbose: int = 0 test: int | None = None auto_run:", "bakalariapi.objects.BakalariObject, title: str | None = None): if title is not None: print(title)", "known parent package\") a `shell` se naimportuje \"přímo\" (resp. ne relativně), což už", "zadána adresa serveru\", color=\"red\") partial_init_mode() return False if args.username is None: try: args.username", "ale spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka),", "\"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return Init() else: return", "neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID: int): m = __import__(__name__) t", "schuzka in filter( lambda x: today_aware < x.start_time and x.start_time < today_aware +", "Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která při zavolání ukončí", "**bound.kwargs) session.session.verify = False if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ =", "podporován... Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove =", "hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném", "automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga", "\"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\",", "-> dict | None: global args with get_io_file(CONFIG_FILE, True) as f: parsed =", "partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném módu.", "output.append(key) rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None): if", "color), \"Z - Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key", "serveru, ale žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\", ) return True", "počet zpráv, které se načtou a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command(", "je složka), tak relativní `import` selže (\"ImportError: attempted relative import with no #", "except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\")", "rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H - Označí úkol jako hotový\", \"\" if obj.done", "řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí", "login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když", "global args with get_io_file(CONFIG_FILE, True) as f: parsed = json.load(f) return parsed parser", "if input_letter in \"nf0\": return False def dialog_cislo(text: str = \"\", default: int", "if enter_pokracovani else [] for key in keys: if isinstance(key, tuple): if key[1]", "podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def", "od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm", "webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\",", "bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is None", "import getpass import inspect import json import logging import logging.config import os import", "= False if not args.no_init: successful_init = Init() if not args.no_import: try: with", "= [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\",", "Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None,", "color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme", "print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy", "vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\"", "len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1 for znamka in znamky:", "\"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu", "pozůstatek po předchozím JSONu, pokud byl delší jak náš současný) f.truncate() print(f\"JSON data", "print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() #", "= bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init =", "return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož", "done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní", "threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\",", "ask_import() -> bool: try: if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat", "# for homework in homeworks: # if not zobrazHotove and homework.Done: # continue", "\"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém módu'\",", "bool = False no_import: bool = False disable_config: bool = False commands: list[str]", "shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) )", ") parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný", "BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato", "4 - Info # 5 - NOSET if args.verbose != 0: logging.basicConfig( level=[", "str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks: list[Task] = [", "nefugnuje při importu jako modul, jelikož v tom případě # hledá modul `shell`", "= bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if not args.no_init: successful_init =", "unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return", "uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic", "(\"disable_config\" in parsed): from_config = load_args_from_config() if from_config is not None: parsed =", "for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online", "Porovnávání typ_mismatch = 0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\" * 30)", "dest=\"commands\", default=None, ) # Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak se", "@dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True", "případě # hledá modul `shell` jako \"globální\" modul (ne jako \"lokální\" ve složce),", "Callable, cast import bakalariapi import platformdirs import requests import rich from bakalariapi.utils import", "# input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6(): count_total = 0 count_invalid", "cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path)", "\"None\" z argparse) parsed = {k: v for k, v in vars(parser.parse_args()).items() if", "autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread]", "hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in` if not (\"disable_config\" in", "task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved))", "daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\",", "program se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for", "in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate =", "minute=0, second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) )", "in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False)", "for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate", "= tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print(", "ImportError: import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs =", "if zobraz_hotove else nehotove} ***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\") break", "is not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS", "ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type", "if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará", "def handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo,", "která jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím", "- verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\"", "# ano/true/yes/1 / ne/false/no/0 if color is not None: rich_print(message, end=\"\", color=color) inpt", "jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš", "json import logging import logging.config import os import threading import time import traceback", "int | None = None auto_run: bool = False no_init: bool = False", "+ str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie", "@dataclass class Args: url: str | None = None username: str | None", "probrallity = (count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\"", "def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží", "\"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\")", "1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\")", ") except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze", "False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení", "string, ale zatím to problém není, tak to neřeším eShrug objects = tuple(map(lambda", ") def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"o\":", "`KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu", "%d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" )", "as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for", "with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] =", "tomu nejspíše nikdy nedojde) # (a navíc alespoň nemusí řešit formátování při \"config", "completed: float | None = None, advance: float | None = None, description:", "== \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze", "done: Callable): key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen", "api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if", "- Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower()", "= None browser: str | None = None executable_path: str | None =", "else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data byla již importována, je", "completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start()", "Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON", "%Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first =", "'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\",", "(v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE #####", "\"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver", "unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades())", "def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\\nPro přepnutí", "None: # Pravděpodobně někdy bude problém, že se vše převádí na string, ale", "bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if", "# print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls()", "if key_press.key == Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and key_press.key ==", "0 id_len_mismatch = 0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů v", "= shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se získání", "= shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí", "ano... Actually je to web scraper, ale API zní líp :)\", ) if", "(= přes `import`), tak vše proběhne v pořádku # Pokud se ale spustí", "\"\" # Delta totiž může být očividně i negativní if not is_before and", "try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice()", "se nevolá. Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter:", "length = len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try: count = dialog_cislo(\"Kolik", "= True for komens in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ):", "bool = False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh,", "Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower()", "try: args.password = getpass.getpass(\"Heslo: \") except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se", "print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh: bool = False): print(\"Načítání úkolů...\")", "is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in", "API zní líp :)\", ) if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument(", "pro prohlížeč, který je specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\",", "progress: Progress, task_id: TaskID) -> None: self.progress = progress self.task_id = task_id def", "sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command(", "zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny", "False rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings():", "isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color = \"\"", "in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy = fresh() else: zpravy", "neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with", "schůzky\") return cls() count = 1 for zprava in zpravy: try: show(zprava, f\"***", ") subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", )", "None else f\"({default})\") while True: inpt = input() if not inpt: if default", "Command_Komens(limit: int | None = None, force_fresh: bool = False): def fresh() ->", "only_first_page=False, ) if len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se", "update( self, total: float | None = None, completed: float | None =", "Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte", "Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks: list[Task] = [ Task(\"Získání Komens", "platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class", "\"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden import dat (z hlavního souboru)\",", "None if args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api", "\"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, ) ) if", "\") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return", "): self.progress.update( self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self):", "%.2f%%\" % probrallity) ################################################## ##### MAIN ##### ################################################## def main(): global api global", "daný argument načíst hodnotu z configu (protože hodnota z configu # se přepíše", "Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1),", "a bylo prodlouženo na \" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def", "default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\",", "f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if", "locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven - verze", "is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None) and not", "100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN ##### ##################################################", "RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread) for", "**kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify", "označen jako hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\")", "pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené", ") logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose", "in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date", "args.username, args.password, selenium) successful_init = False if not args.no_init: successful_init = Init() if", "{ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le =", "list[threading.Thread] = [] for task in tasks: thread = threading.Thread( target=task.function, args=( api,", "0 nehotove = 0 for ukol in ukoly: if ukol.done: hotove += 1", "True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\") with get_io_file(file_name,", "== \"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z data", "[] output: list[bakalariapi.Komens] = [] with Progress() as progress: task = RichTask( progress,", ") as session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\")", "\"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se", "{offset} výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool", ") first = True for znamka in filter( lambda x: min(lasttime, today -", "help=\"Pokud je tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, )", "Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není podporován... Sadge\") return #", "\"\"\" Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce. Args: handler:", "if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade):", "print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count = 1 for schuzka in", "evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file:", "'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor s konfigurací se", "a shell poběží v omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci", "konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\",", "relativní `import` selže (\"ImportError: attempted relative import with no # known parent package\")", "již importována, je tedy možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except", "se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není", "shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\",", "Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print( f\"Bakalarishell připraven", "if o is None else \"; Výsledek testu:\") ) if o is not", "přes `import`), tak vše proběhne v pořádku # Pokud se ale spustí přes", "in homeworks: # if not zobrazHotove and homework.Done: # continue # print(\"*** Domácí", "def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api,", "sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell se", "datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old,", "\"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\\nPro přepnutí do plného módu", "IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12,", "spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) )", "- count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity)", "task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved:", "prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from rich.console import Console from rich.logging", "KeyPress from prompt_toolkit.keys import Keys from rich.console import Console from rich.logging import RichHandler", "not is_before and delta >= timedelta(hours=-1): color = \"red\" elif is_before and delta", "datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), )", "continue # print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování", "and not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat())", "title: str | None = None): if title is not None: print(title) if", "přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password is None: try: args.password =", "print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press:", "float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break last =", "subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser(", "partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try:", "po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl", "typ_new: print(f\"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len", "= 0 count_invalid = 0 try: while True: count_total += 1 output =", "shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, )", "if (znamka.need_confirm and not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True) as", "Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved", "else: nehotove += 1 if hotove + nehotove == 0: print(\"Nebyly nalezeny žádné", "partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with Progress() as progress: task =", "os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass", "= key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol označen jako hotový\") elif", "Lootingu, zkouším načíst ze serveru\") schuzky = fresh() length = len(schuzky) if length", "not None: # Pravděpodobně někdy bude problém, že se vše převádí na string,", "\" (+ max 1s) do konce a bylo prodlouženo na \" + str(current)", "x.confirmed) or min(lasttime, today - timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first", "disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel", "+= 1 else: nehotove += 1 if hotove + nehotove == 0: print(\"Nebyly", "připojení, inicializace nyní proběhne znovu...\" ) return Init() else: return ask_import() except KeyboardInterrupt:", "type (old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání", "# 3 - Debug; Pouze BakalářiAPI # 4 - Info # 5 -", "| None = None, force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]: if", "self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects: Any,", "%H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif cmd", "parsed = {k: v for k, v in vars(parser.parse_args()).items() if v is not", "a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací", "`SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o", "**kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell se nachází v", "None} # Jelikož hodnoty filtrujeme, tak pokud i po filtrování je \"disable_config\" #", "nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou a tím i zrychlí proces\",", "help=\"Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\",", "adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\")", "key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c", "if id_old != id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ:", "argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný", "shell se nachází v omezeném módu. Pro přepnutí do online módu můžete zkusit", "if args.verbose < 4: for logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict", "in range(count): if offset >= length: break print(studenti[offset].format()) offset += 1 input( f\"Pro", "color=\"red\") partial_init_mode() return False if args.password is None: try: args.password = getpass.getpass(\"Heslo: \")", "= rich.get_console() if file is None else Console(file=file) if color is not None:", "bude potlačen. Pokud `False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován,", "commands: list[str] = field(default_factory=list) args: Args class RichTask: def __init__(self, progress: Progress, task_id:", "souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"): try: with get_io_file(file_name, False) as f:", "try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset =", "if default is None else f\"({default})\") while True: inpt = input() if not", "metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží", "if not (\"disable_config\" in parsed): from_config = load_args_from_config() if from_config is not None:", "bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace JSON dat...\")", "= {k: v for k, v in vars(parser.parse_args()).items() if v is not None}", "předchozím JSONu, pokud byl delší jak náš současný) f.truncate() print(f\"JSON data vygenerována a", "spustí jako modul (= přes `import`), tak vše proběhne v pořádku # Pokud", "int = 0 test: int | None = None auto_run: bool = False", "str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode: str = \"r+\")", "), ) thread.start() threads.append(thread) for thread in threads: thread.join() print() autorun() else: rich_print(", "(opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser,", "+ f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max", "shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\", spread_arguments=True, ) ) parser", "parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\",", "zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce. Args: handler: Funkce do", "f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api:", "import inspect import json import logging import logging.config import os import threading import", "otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ################################################## def RunTest(ID: int):", "= True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne zaznamenávat zmáčklé klávesy, které", "| None = None, refresh: bool = False, **fields, ): self.progress.update( self.task_id, total=total,", "in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga", "inpt[0].lower() if input_letter in \"aty1\": return True if input_letter in \"nf0\": return False", "== \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler))", "help=\"Pokud je tato flaga přítomna, vynutí se získání dat ze serveru\", default=False, action=\"store_true\",", "] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress:", "= key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c =", ") partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url is None: try: args.url", "None = None, force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]: if api.is_partial_init:", "else: try: from . import shell except ImportError: import shell tb_install(show_locals=True) cls =", "nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove =", "help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser, short_help=\"Importuje data z", "elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler is not", "FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data", "% probrallity) ################################################## ##### MAIN ##### ################################################## def main(): global api global args", "if v is not None} # Jelikož hodnoty filtrujeme, tak pokud i po", "parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede se", "parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command(", "{'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not", "nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error) print(", "spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)", "__future__ import annotations import argparse import asyncio import getpass import inspect import json", "ano, ano... Actually je to web scraper, ale API zní líp :)\", )", "False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not None: RunTest(args.test)", "cls() # for homework in homeworks: # if not zobrazHotove and homework.Done: #", "task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def", "údaje o uložené konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command(", "args def load_args_from_config() -> dict | None: global args with get_io_file(CONFIG_FILE, True) as", "if title is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P -", "\"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\",", "= 0 ################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects: Any, sep: str", "else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif cmd == \"check\": if", "Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI # 4 - Info #", "globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\",", "Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují # a nelze", "rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password is None: try:", "`True`, tudíž se můžeme dotazovat (jen) přes `in` if not (\"disable_config\" in parsed):", "print(\"Úkol označen jako nehotový\") elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\"))", "today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní a zítřejší", "Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False, ) ) if __name__", "\"), True) def Test5(): print(\"Tento test již není podporován... Sadge\") return # homeworks", "default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) )", "pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for id_old,", "nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global", "= 0 for ukol in ukoly: if ukol.done: hotove += 1 else: nehotove", "if not zobrazHotove and homework.Done: # continue # print(\"*** Domácí úkol ***\") #", "12, 31, 23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek", "key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key == \"z\":", "in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None]", "successful_init = False if not args.no_init: successful_init = Init() if not args.no_import: try:", "\"lokální\" ve složce), tudíž selže. if TYPE_CHECKING: from . import shell else: try:", "[] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0)", "= shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou získány", "= os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku,", "if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\") zpravy", "nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program", "api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0 id_len_mismatch", "klávese Enter ukončí záznam kláves. Pozn.: Pokud True, tak se funkce v parametru", "if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def show(obj:", "datetime, timedelta from typing import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import", "přítomen, program se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno;", "except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password is", "): if first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na", "KeyboardInterrupt elif handler is not None: handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())):", "Pozn.: Pokud někdo dumá nad tím, proč zde tedy není jen druhá možnost,", "(+ max 1s) do konce a bylo prodlouženo na \" + str(current) )", "k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date),", "if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\", encoding=\"utf-8\"): pass return", "< today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní a", "print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner", "return output if force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy)", "zobrazuji...\") cls() count = 1 for znamka in znamky: try: show(znamka, f\"*** Známka", "break def Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init: partial_init_notice() return []", ") def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"h\":", "def dialog_ano_ne( text: str = \"\", default: bool | None = None, color:", "se zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return Init() else: return ask_import()", "3 - Debug; Pouze BakalářiAPI # 4 - Info # 5 - NOSET", "neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\",", "def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length)", "################################################## def Init() -> bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell", "vynutí se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh])", "serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga", "0 id_mismatch = 0 print(\"=\" * 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\")", "\"aty1\": return True if input_letter in \"nf0\": return False def dialog_cislo(text: str =", "v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for", "False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first", "short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser() subparsers = parser.add_subparsers( required=True, metavar=\"příkaz\", dest=\"cmd\",", "ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type", "inicializace nyní proběhne znovu...\" ) return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode()", "if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return cls() count =", "pokusů bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total - count_invalid) / count_total", "= api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass", "with get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci, aby to šlo přehledně", "se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID", "\"check\": if os.path.exists(config_path): s = os.stat(config_path) rich_print( f\"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d.", "f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze", "(pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login =", "skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\")", "from rich.traceback import install as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack", "command in args.commands: print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní", "= globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI", "keys: if isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key)", "fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(", "tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument(", "def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE ##### ##################################################", "argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\",", "default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se má spustit\", # dest=\"test\",", "je tedy možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode()", "with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám", "připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else", "threads.append(thread) for thread in threads: thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn", "flaga přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command(", "\"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key == \"z\": c =", "se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch +=", "rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def", "100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených", "x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní", "KeyPress, done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme", "skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání", "f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"save\": save_config()", "False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\",", "**kwargs): # `cast()` protože jsem zatím nepřišel na způsob, jak dostat hint při", "): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\" )", "False def dialog_cislo(text: str = \"\", default: int | None = None): print(text,", ") # Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne", "return False if args.username is None: try: args.username = input(\"Přihlašovací jméno: \") api.username", "zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]),", "módu. Pro přepnutí do online módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def", "< length: try: for _ in range(count): if offset >= length: break print(studenti[offset].format())", "\"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný; Uložená data byla", "session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"]", "api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH", "= True for znamka in filter( lambda x: min(lasttime, today - timedelta(5)) <", "\" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE,", "data, která jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek po", "bakalariapi.SeleniumHandler | None = None if args.browser is not None: selenium = bakalariapi.SeleniumHandler(", "# cls() def Test6(): count_total = 0 count_invalid = 0 try: while True:", "{hotove + nehotove if zobraz_hotove else nehotove} ***\", ) count += 1 except", "<= timedelta(minutes=5): color = \"yellow\" elif is_before and delta <= timedelta(minutes=30): color =", "if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným", "print(\"Testování ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data =", "[x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"]", "# elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [", "pořádku # Pokud se ale spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde", "Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None]", "def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva", "bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except", "if first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d.", "description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks: list[Task] =", "# Odstraníme data, která jsou případně po JSONu, co jsme teď napsali (třeba", "běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme", "omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz \"init\".', color=\"yellow\", )", "count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False):", ") if last < current: print(\"\\n\") break last = current time.sleep(1) print( \"Sezení", "nebo \"python bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní `import` selže (\"ImportError:", "je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press:", "{length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool", "je to web scraper, ale API zní líp :)\", ) if parser.prog ==", "kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell():", "length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse(", "a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat", "bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False) as", ") if o is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc()", "aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or", "is None else Console(file=file) if color is not None: # Pravděpodobně někdy bude", "################################################## ##### FUNKCE ##### ################################################## def rich_print( *objects: Any, sep: str = \"", "not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password,", "+= 1 if hotove + nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\")", "shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou získány v", "while True: print( \"Současný zbývající čas: \" + str(last) + \" \" *", "else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else", "zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud", "except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init:", "str | None = None browser: str | None = None executable_path: str", "show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter:", "first = True for komens in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade)", "def patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel na způsob, jak dostat", "return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test již není podporován... Sadge\") return", "loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if key_press.key ==", "total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0])", "def rich_print( *objects: Any, sep: str = \" \", end: str = \"\\n\",", "if not args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass", "nelze vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí do online", "highlight=False, ) if not (api.server_info.version is None) and not api.is_version_supported(): rich_print( \"*** Jiná", "{le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není podporován... Sadge\") return # return", "úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\",", "v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí", "None else Console(file=file) if color is not None: # Pravděpodobně někdy bude problém,", "shell except ImportError: import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell", "\"exit\" not in args.commands and (not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0,", "+= 1 for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(),", "= False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]'", "api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední známky:\") note = znamka.note1.strip() or", "\"--force\", help=\"Pokud je tato flaga přítomna, neprovede se odlášení sessionů a aplikace se", "tak relativní `import` selže (\"ImportError: attempted relative import with no # known parent", "= 0 print(\"=\" * 30) print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů", "print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green]", "None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None) and not api.is_version_supported():", "len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\")", "function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks: list[Task] = [ Task(\"Získání", "keys: # if key_press.key == Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and", "type=int, help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\",", "parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak se při klávese Enter ukončí", "asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before", "<= timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\", color),", "== Keys.F4: # for key_press in keys: # if key_press.key == Keys.Escape: #", "task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length", "return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k", "length = len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\") return", "nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning)", "Callable): def patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel na způsob, jak", "else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}),", "= False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta]", "f: f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command", "cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else:", "je # kvůli tomu, že ta zase pro změnu nefugnuje při importu jako", "if len(inpt) == 0: if default is None: continue return default input_letter =", "Studenta: \"), True) def Test5(): print(\"Tento test již není podporován... Sadge\") return #", "se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen.", "s konfigurací se bude ignorovat, tudíž se brát v potaz pouze argumenty z", "předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True for schuzka in", "return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output =", "= False): print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh", "x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na", "daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None,", "f\" - {note}\") ) first = True for komens in filter( lambda x:", "flush: bool = False, color: str | None = None, **kwargs, ): c", "delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\",", "with open(path, \"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE,", "False if args.url is None: try: args.url = input(\"URL adresa serveru: \") api.server_info.url", "print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již", "rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat", "bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask):", "try: for _ in range(count): if offset >= length: break print(studenti[offset].format()) offset +=", "key_press: Zaznamenaný stisk klávesy. done: Funkce, která při zavolání ukončí záznam kláves. Pokud", "se `bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne v pořádku", "Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path):", "rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace", "thread.join() print() autorun() else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", )", "export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch", "\"show\": if os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace", "= \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument", "se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, )", "f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login() return x return patched bakalariapi.sessions.RequestsSession.__init__", "1 for znamka in znamky: try: show(znamka, f\"*** Známka {count} z {length} ***\")", "\"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace", "IO: \"\"\"Vrátí file handler na daný soubor `file` v uživatelské (data) složce.\"\"\" path", "# Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat", "homework.Done: # continue # print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") #", "encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci, aby", "spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") ) parser = shell.ShellArgumentParser()", "try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice()", "bound.arguments[\"login\"] bound.arguments[\"login\"] = False x = f(*bound.args, **bound.kwargs) session.session.verify = False if login:", "##### ################################################## def main(): global api global args def load_args_from_config() -> dict |", "asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku je:", "= current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \" + str(last) +", "print(\"Úkol označen jako hotový\") elif key == \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako", "args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test", "do které se passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy.", "pokud máme asynchoní funkci, lepší řešení pro poslední řádku je: ``` await keyhandler(handler)", "get_io_file(file: str, create_file: bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí file handler", "print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init: partial_init_notice() return", "napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš současný) f.truncate()", "pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento", "\"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None,", "= input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\",", "je `True`. done_on_enter: Pokud True, tak se při klávese Enter ukončí záznam kláves.", "if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)() rich_print( f\"Test", "< current: print(\"\\n\") break last = current time.sleep(1) print( \"Sezení bylo prodlouženo, když", "\"\" if default is None else f\"({default})\") while True: inpt = input() if", "neprofiltrují # a nelze pro daný argument načíst hodnotu z configu (protože hodnota", "poslední řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt =", "[ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False", "== '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None", "obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key == \"z\": c = Console()", "tak to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep,", "nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}'", "False if args.username is None: try: args.username = input(\"Přihlašovací jméno: \") api.username =", "elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H", "= datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task:", "rich.console import Console from rich.logging import RichHandler from rich.progress import BarColumn, Progress, TaskID,", "' (Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0 if color is not", "current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break", "specifikovaný pomocí '-b'\", dest=\"executable_path\", default=None, ) parser.add_argument( \"-t\", \"--test\", type=int, help=\"Test, který se", "Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int", "%Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\" )", "length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask):", "bylo prodlouženo na \" + str(current) ) except KeyboardInterrupt: print(\"Testování ukončeno\") def Test1():", "list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\",", "spustit\", # dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga", "if default is None: continue return default input_letter = inpt[0].lower() if input_letter in", "api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if", "Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\",", "Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init: partial_init_notice() return [] output =", "error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné pokusy:", "= len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1 for znamka in", "se prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password = args.password try: rich_print(", "zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + (", "= session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas: \"", "je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid}", "bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za běhu\",", "heslo; Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"password\", default=None,", "z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná", "# Verbose: # 0 - Nic # 1 - Warning; Pouze BakalářiAPI #", "%Y')}\" ) first = True for schuzka in filter( lambda x: today_aware <", "[magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True for schuzka in filter(", "import with no # known parent package\") a `shell` se naimportuje \"přímo\" (resp.", "handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud", "tudíž se můžeme dotazovat (jen) přes `in` if not (\"disable_config\" in parsed): from_config", "first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} -", "načtou a tím i zrychlí proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens", "známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), )", "for komens in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first:", "return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False`", "first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')}", "None = None, visible: bool | None = None, refresh: bool = False,", "argument přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"heslo\",", "módu můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str = \"\",", "if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\", True, \"yellow\", ): Command_Import()", "'' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else", "count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN", "spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je", "help=\"Limituje počet zpráv, které se načtou a tím i zrychlí proces\", ) shell_instance.add_command(", "None: rich_print(message, end=\"\", color=color) inpt = input() else: inpt = input(message) if len(inpt)", "lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1)", "offset < length: try: for _ in range(count): if offset >= length: break", "zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str = \"\", default: bool", "try: args.url = input(\"URL adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla", "tím, proč zde tedy není jen druhá možnost, tak to je # kvůli", "je tato flaga přítomna, spustí se automatické úlohy\", action=\"store_true\", dest=\"auto_run\", default=None, ) parser.add_argument(", "test {ID}\") try: o = getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\"", "str = \"\", default: int | None = None): print(text, \"\" if default", "o = getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\" if o is", "\"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla", "# Pravděpodobně někdy bude problém, že se vše převádí na string, ale zatím", "jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla načtena\")", "None else (' (Ano)' if default else ' (Ne)')}: \" while True: #", "\" while True: # ano/true/yes/1 / ne/false/no/0 if color is not None: rich_print(message,", "inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool: try: if args.no_import:", "field(default_factory=list) args: Args class RichTask: def __init__(self, progress: Progress, task_id: TaskID) -> None:", "Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který", "ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\"", "input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\")", "hack na to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky #", "last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas:", "for znamka in znamky: try: show(znamka, f\"*** Známka {count} z {length} ***\") count", ") parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro", "la = len(IDs) print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = []", "return default input_letter = inpt[0].lower() if input_letter in \"aty1\": return True if input_letter", "což už je v pořádku. # Pozn.: Pokud někdo dumá nad tím, proč", "fresh() -> list[bakalariapi.Komens]: if api.is_partial_init: partial_init_notice() return [] output: list[bakalariapi.Komens] = [] with", "x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci", "Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") #", "získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser,", "and delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře schůzku v", "api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru, ale žádné funkce", "WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru", "parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud `False`, `KeyboardInterrupt`", "api.looting.get(bakalariapi.Meeting), ): if first: first = False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M", "task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved", "importovat uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server není", "asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if key_press.key", "v \"parsed\" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in`", "patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel na způsob,", "# continue # print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro", "jak náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\") def", "shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo,", "default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného souboru\",", "20, end=\"\\r\" ) # Some spaces to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) )", "################################################## def main(): global api global args def load_args_from_config() -> dict | None:", "předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True for znamka", "else [] for key in keys: if isinstance(key, tuple): if key[1] == \"\":", "\"_lasttime\" @dataclass class Args: url: str | None = None username: str |", "New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if", "= False commands: list[str] = field(default_factory=list) args: Args class RichTask: def __init__(self, progress:", "Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt:", "probrallity = (count_total - count_invalid) / count_total * 100 print(\"Konečná ravděpodobnost úspěšnosti je", "for ukol in ukoly: if ukol.done: hotove += 1 else: nehotove += 1", "bakalarishell\" (kde \"bakalarishell\" # je složka), tak relativní `import` selže (\"ImportError: attempted relative", "except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti", ") parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command(", "False, force_fresh: bool = False): print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks(", "from . import shell except ImportError: import shell tb_install(show_locals=True) cls = shell.cls api:", "zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující se", "%m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first", "argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno", "x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None,", "as f: parsed = json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu", "task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ]", "ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except", "pass if args.test is not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne", "os import threading import time import traceback import warnings import webbrowser from dataclasses", "KeyPress, done: Callable): key = key_press.key.lower() if key == \"h\": obj.mark_as_done(api, True) print(\"Úkol", "v potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None, ) parser.add_argument( \"-c\",", "z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh:", "ze zdrojové složky # Pokud se `bakalarishell` spustí jako modul (= přes `import`),", "args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True)", "second=0, microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first", "threading import time import traceback import warnings import webbrowser from dataclasses import dataclass,", "from typing import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs import", "warnings import webbrowser from dataclasses import dataclass, field from datetime import datetime, timedelta", "key_press.key == Keys.Enter: done() # elif key_press.key == Keys.F4: # for key_press in", "break def Command_Ukoly(fast: bool = False, force_fresh: bool = False): print(\"Načítání úkolů...\") try:", "else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším", "os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož", "za běhu\", nargs=\"?\", default=None, ) parser.add_argument( metavar=\"jmeno\", help=\"Přihlašovací jméno; Pokud není tento argument", "`import` selže (\"ImportError: attempted relative import with no # known parent package\") a", "BakalářiAPI # 4 - Info # 5 - NOSET if args.verbose != 0:", "= getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\" if o is None", "_globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\"", "být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print(", "jako hotový\", \"\" if obj.done else \"green\"), \"N - Označí úkol jako nehotový\",", "Označí úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press:", "pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if", "o is None else \"; Výsledek testu:\") ) if o is not None:", "default=None, ) parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance", "else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0", "'{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE)", "]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler", "= rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\",", "Pokud True, tak se při klávese Enter ukončí záznam kláves. Pozn.: Pokud True,", "api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující se ID! Old:", "metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se", "color is not None: # Pravděpodobně někdy bude problém, že se vše převádí", "if TYPE_CHECKING: from . import shell else: try: from . import shell except", "z argparse) parsed = {k: v for k, v in vars(parser.parse_args()).items() if v", "Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new", "= True for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first", "first: first = False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m.", "c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True))", ") shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) )", "shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument(", "Actually je to web scraper, ale API zní líp :)\", ) if parser.prog", "úkoly?\") # cls() # for homework in homeworks: # if not zobrazHotove and", "is not None} # Jelikož hodnoty filtrujeme, tak pokud i po filtrování je", "jen druhá možnost, tak to je # kvůli tomu, že ta zase pro", "případně po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud", "required=True, metavar=\"příkaz\", dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\",", "argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) )", "import logging.config import os import threading import time import traceback import warnings import", "progress.log(f\"Online schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally:", "print(f\"IDčka online schůzek získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress()", "thread = threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ),", "exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z daného", "může být očividně i negativní if not is_before and delta >= timedelta(hours=-1): color", "{'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() -", "Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove} ***\", )", "TIME_FILE = \"_lasttime\" @dataclass class Args: url: str | None = None username:", "filtrování je \"disable_config\" # v \"parsed\" tak má hodnotu `True`, tudíž se můžeme", "inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands and (not args.no_import or args.auto_run):", "save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci, aby to šlo", "not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu", "print(\"Generace JSON dat...\") with get_io_file(file_name, True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme", "současný) f.truncate() print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str", "v tom případě # hledá modul `shell` jako \"globální\" modul (ne jako \"lokální\"", "default else ' (Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0 if color", "as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved =", "task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api)", "ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch += 1 print(", "except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else:", "tudíž se brát v potaz pouze argumenty z příkazové řádky\", action=\"store_true\", dest=\"disable_config\", default=None,", "meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif", "ale API zní líp :)\", ) if parser.prog == \"\": parser.prog = \"bakalarishell\"", "když k tomu nejspíše nikdy nedojde) # (a navíc alespoň nemusí řešit formátování", "scraper, ale API zní líp :)\", ) if parser.prog == \"\": parser.prog =", "tedy není jen druhá možnost, tak to je # kvůli tomu, že ta", "count_total = 0 count_invalid = 0 try: while True: count_total += 1 output", "KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password =", "\"bakalarishell\" # je složka), tak relativní `import` selže (\"ImportError: attempted relative import with", ") else: print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd", "relative import with no # known parent package\") a `shell` se naimportuje \"přímo\"", "# Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují # a", "is_before and delta <= timedelta(minutes=5): color = \"yellow\" elif is_before and delta <=", "pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak se při klávese", "1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch)", "def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for", "který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True,", "je tato flaga přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True,", "\"import\", Command_Import, argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\",", "s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení,", "byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif cmd ==", "from bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress", "KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool = False): def fresh(): if api.is_partial_init: partial_init_notice()", "[ (\"H - Označí úkol jako hotový\", \"\" if obj.done else \"green\"), \"N", "if not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, \"x\",", "výsledků najednou?\", 25) except KeyboardInterrupt: return offset = 0 cls() while offset <", "znovu...\" ) return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False except", "\"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()], ) logging.info( \"Logging zapnut na levelu %s", "appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE = \"_lasttime\" @dataclass class Args: url:", "elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path):", "proběhne v pořádku # Pokud se ale spustí přes \"python main.py\" nebo \"python", "datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old", ") def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## ##### FUNKCE #####", "se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url", "None executable_path: str | None = None verbose: int = 0 test: int", "except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count", "show( ukol, f\"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else", "print(\"Žádná konfigurace není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd", "New: {new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new in zip(", "print() rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\"", "ukol in ukoly: try: if not zobraz_hotove and ukol.done: continue cls() show( ukol,", "Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\",", "argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu, který se má spustit\")", "akci nelze vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí do", "59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany ({la})\") print()", "= json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano,", "None, **kwargs, ): c = rich.get_console() if file is None else Console(file=file) if", "# Pokud se ale spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\"", "def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI,", "_globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance = shell.Shell( #", "tak pokud i po filtrování je \"disable_config\" # v \"parsed\" tak má hodnotu", "traceback import warnings import webbrowser from dataclasses import dataclass, field from datetime import", "dotazovat (jen) přes `in` if not (\"disable_config\" in parsed): from_config = load_args_from_config() if", "žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo()", "rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password = \"\"", "= api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze", "rich_print( f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze", "color is not None: rich_print(message, end=\"\", color=color) inpt = input() else: inpt =", "uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname):", "TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import install as tb_install from urllib3.exceptions", "alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def", "0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\")", "'' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else", "api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice()", "import asyncio import getpass import inspect import json import logging import logging.config import", "v Lootingu, zkouším načíst ze serveru\") schuzky = fresh() length = len(schuzky) if", "k, v in vars(parser.parse_args()).items() if v is not None} # Jelikož hodnoty filtrujeme,", "await evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str,", "typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()):", "print(text, \"\" if default is None else f\"({default})\") while True: inpt = input()", "- Označí úkol jako hotový\", \"\" if obj.done else \"green\"), \"N - Označí", "print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity", "- Nic # 1 - Warning; Pouze BakalářiAPI # 2 - Info; Pouze", "None): if title is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P", "False disable_config: bool = False commands: list[str] = field(default_factory=list) args: Args class RichTask:", ").get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task:", "else: rich_print( \"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not", "finally: le = len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le};", "rich_print( *objects: Any, sep: str = \" \", end: str = \"\\n\", file:", "args=( api, RichTask( progress, progress.add_task( task.description, start=task.start, total=0 ), ), ), ) thread.start()", "length: break print(studenti[offset].format()) offset += 1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno", "if len(output) <= 20: count_invalid += 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo", "help=\"ID testu, který se má spustit\") shell_instance.add_command( shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný", "Command_Ukoly(fast: bool = False, force_fresh: bool = False): print(\"Načítání úkolů...\") try: if fast:", "+= 1 print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid}", "New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\" )", "***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls() def", "in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance", "except ImportError: import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs", "smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak se při", "- Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] ) def", "tak se při klávese Enter ukončí záznam kláves. Pozn.: Pokud True, tak se", ") subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz", "záznam kláves. Pozn.: Pokud True, tak se funkce v parametru handler nevolá. mask_keyboard_interrupt:", "ukončeno\") def Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data()", "'{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\" ) id_mismatch", "float | None = None, advance: float | None = None, description: str", "co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak", "is None) and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí", "úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key", "with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) )", "# raise SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif", "tomu, že ta zase pro změnu nefugnuje při importu jako modul, jelikož v", "isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output))", "lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read())", "po filtrování je \"disable_config\" # v \"parsed\" tak má hodnotu `True`, tudíž se", "if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands:", "- timedelta(5)) < x.time, api.looting.get(bakalariapi.Komens), ): if first: first = False print(\"Komens zprávy:\")", "typing import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs import requests", "else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None) and not api.is_version_supported(): rich_print(", "self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update( self, total: float | None", "def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if", "fresh() length = len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné aktualní schůzky\")", "time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity =", "f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\"", "jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu,", "Pozn.: Pokud True, tak se funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`,", "z configu # se přepíše hodnotou \"None\" z argparse) parsed = {k: v", "RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved))", "BakalářiAPI # 3 - Debug; Pouze BakalářiAPI # 4 - Info # 5", "\"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands", "- timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first", "f\"{text} Ano/Ne{'' if default is None else (' (Ano)' if default else '", "daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je", "nedojde) # (a navíc alespoň nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f,", "refresh=refresh, **fields, ) def finish(self): task = self.progress.tasks[self.task_id] task.finished_time = 0 ################################################## #####", "len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" )", ") parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se automatické úlohy\",", "Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework):", "None]], None] | None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool = False,", "with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)", "ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\", spread_arguments=True,", "KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler))", "jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v", "not zobrazHotove and homework.Done: # continue # print(\"*** Domácí úkol ***\") # print(homework.Format())", "{len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in", "not args.no_init: successful_init = Init() if not args.no_import: try: with get_io_file(\"main\", False) as", "elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj,", "= Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async", "= api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch = 0", "data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace", "if not (api.server_info.version is None) and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů!", "dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell", "logger in [ logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate", "zase pro změnu nefugnuje při importu jako modul, jelikož v tom případě #", "{datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není", "Test1(): # \"Kopírování\" print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data() new =", "len(zpravy) == 0: print(\"Žádné zprávy v Lootingu, zkouším načíst ze serveru\") zpravy =", "lambda x: today_aware < x.start_time and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ):", "import KeyPress from prompt_toolkit.keys import Keys from rich.console import Console from rich.logging import", "ukol.done: hotove += 1 else: nehotove += 1 if hotove + nehotove ==", ") return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException:", "shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"]", "\"-t\", \"--test\", type=int, help=\"Test, který se má spustit\", # dest=\"test\", metavar=\"ID\", default=None, )", "api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class", ") current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\")", "shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True,", "- Debug; Pouze BakalářiAPI # 4 - Info # 5 - NOSET if", "schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length} ***\") count += 1 cls()", "{'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \\\"{schuzka.name.strip()}\\\"\" ) first = True for", "KeyboardInterrupt: partial_init_mode() return False if args.url is None: try: args.url = input(\"URL adresa", "True: inpt = input() if not inpt: if default is None: continue return", "v pořádku # Pokud se ale spustí přes \"python main.py\" nebo \"python bakalarishell\"", "KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False if args.username is None:", "ukol, f\"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove}", "nachází v omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz \"init\".',", "args.password, selenium) successful_init = False if not args.no_init: successful_init = Init() if not", "\"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední", "aktualní schůzky\") return cls() count = 1 for zprava in zpravy: try: show(zprava,", "task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try:", "tedy možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return", "print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total - count_invalid)", "None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None", "start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved:", "je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" )", "str | None = None executable_path: str | None = None verbose: int", "key_press in keys: # if key_press.key == Keys.Escape: # raise SystemExit elif not", "= api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid +=", "color = \"yellow\" elif is_before and delta <= timedelta(minutes=30): color = \"green\" print_keys(", "totiž může být očividně i negativní if not is_before and delta >= timedelta(hours=-1):", "total: float | None = None, completed: float | None = None, advance:", "if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení zprávy\", \"\" if obj.confirmed", "def Command_Ukoly(fast: bool = False, force_fresh: bool = False): print(\"Načítání úkolů...\") try: if", "): if first: first = False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip()", "konfigurací se bude ignorovat, tudíž se brát v potaz pouze argumenty z příkazové", "def Test6(): count_total = 0 count_invalid = 0 try: while True: count_total +=", "short_help=\"Zobrazí (nadcházející) schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]),", "zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky),", "se nachází v omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz", "def Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1,", ") shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\",", "= dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt: return offset = 0 cls()", "bool = False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else", "\"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\",", "hint při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args,", "except KeyboardInterrupt: rich_print( \"\\nHeslo nebylo zadáno, předpokládá se prázdné heslo\", color=\"yellow\" ) args.password", "testu:\") ) if o is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\")", "color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create(", "python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh =", "delta = obj.start_time_delta color = \"\" # Delta totiž může být očividně i", "not (api.server_info.version is None) and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny", "pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není podporován... Sadge\")", "= len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api)", "{k: v for k, v in vars(parser.parse_args()).items() if v is not None} #", "prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from rich.console", "bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return", "def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože jsem zatím", "zbývající čas: \" + str(last) + \" \" * 20, end=\"\\r\" ) #", "None = None executable_path: str | None = None verbose: int = 0", "None verbose: int = 0 test: int | None = None auto_run: bool", "color = \"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\", color), \"Z -", "= lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter and", "= 0 try: while True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False,", "task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool =", "a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is", "args.url is None: try: args.url = input(\"URL adresa serveru: \") api.server_info.url = args.url", "== \"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\")", "= False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH", "to web scraper, ale API zní líp :)\", ) if parser.prog == \"\":", "session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break last = current", "\"Logging zapnut na levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4:", "které se passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done:", "velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif cmd ==", "| None = None, color: str | None = None ) -> bool:", "dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if", "= load_args_from_config() if from_config is not None: parsed = from_config | parsed args", "microsecond=0) ) first = True for znamka in filter( lambda x: min(lasttime, today", "rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \"", "if args.no_import: if dialog_ano_ne( \"Server není dostupný; Chce importovat uložená data?\", True, \"yellow\",", "= [\"Enter - Pokračování\"] if enter_pokracovani else [] for key in keys: if", "Pokud se ale spustí přes \"python main.py\" nebo \"python bakalarishell\" (kde \"bakalarishell\" #", "ale žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\")", "= None, visible: bool | None = None, refresh: bool = False, **fields,", "hodnota z configu # se přepíše hodnotou \"None\" z argparse) parsed = {k:", "Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(),", "for task in tasks: thread = threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task(", "správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None = None, force_fresh:", ").json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající čas: \" + str(last) +", "navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ):", "print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def", "# 1 - Warning; Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI #", "protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány", "return hotove = 0 nehotove = 0 for ukol in ukoly: if ukol.done:", "subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\",", "bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\")", "if input_letter in \"aty1\": return True if input_letter in \"nf0\": return False def", "verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" )", "získany ({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress: task", "), ), ), ) thread.start() threads.append(thread) for thread in threads: thread.join() print() autorun()", "and x.start_time < today_aware + timedelta(2), api.looting.get(bakalariapi.Meeting), ): if first: first = False", "spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga", "( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first = True for znamka", "z {znamka.time.strftime('%H:%M %d. %m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not", "\"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla", "Warning; Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI # 3 - Debug;", "selenium) successful_init = False if not args.no_init: successful_init = Init() if not args.no_import:", "| None = None, completed: float | None = None, advance: float |", "== '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == ''", "f\"*** Schůzka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\")", "for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old != typ_new: print(f\"Neshodující se typy!", "self.task_id, total=total, completed=completed, advance=advance, description=description, visible=visible, refresh=refresh, **fields, ) def finish(self): task =", "description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web scraper, ale", "= shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\",", "rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] )", "force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni,", "konfiguraci\", ) subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser,", "None: continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str,", "aby to šlo přehledně upravit i z editoru (i když k tomu nejspíše", "handler(key_press, done) with inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) ->", "partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet studentů je {length}\") try: count", "Pouze BakalářiAPI # 2 - Info; Pouze BakalářiAPI # 3 - Debug; Pouze", "Progress, task_id: TaskID) -> None: self.progress = progress self.task_id = task_id def start(self):", "output = [] with Progress() as progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\",", "[magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True for znamka in", "za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser],", "MAIN ##### ################################################## def main(): global api global args def load_args_from_config() -> dict", "default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden import dat", "\"disable_config\" # v \"parsed\" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen)", "successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length = len( api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True) ) task.update(total=length,", "lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade),", "metavar=\"heslo\", nargs=\"?\", help=\"Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá za", "except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x for x", ") subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené", "'{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new])", "= [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print", "package\") a `shell` se naimportuje \"přímo\" (resp. ne relativně), což už je v", "# 2 - Info; Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI #", "+= 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice)", "pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key ==", "None = None, color: str | None = None ) -> bool: message", "list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani else", "byla již importována, je tedy možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode()", "try: while True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False,", "znamka in filter( lambda x: (x.need_confirm and not x.confirmed) or min(lasttime, today -", "souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path =", "\"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd ==", "help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser( \"remove\", help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí", "v omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz \"init\".', color=\"yellow\",", "None = None): if title is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True))", "chci, aby to šlo přehledně upravit i z editoru (i když k tomu", "return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test již", "[yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True)", "evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter and key_press.key ==", "naimportuje \"přímo\" (resp. ne relativně), což už je v pořádku. # Pozn.: Pokud", "daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url is None:", "= inpt[0].lower() if input_letter in \"aty1\": return True if input_letter in \"nf0\": return", "shell.Command( \"test\", RunTest, argparser=parser, short_help=\"Spustí daný test\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser(parents=[parser_fresh])", "z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\") )", "first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d.", "short_help=\"Ukončí shell\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno exportu\",", "is None: continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys:", "= \"yellow\" elif is_before and delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O", "Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session: try: while True: last", "print(f\"Počet typů v datech (old): {len(api.looting.data)}\") print(f\"Počet typů v datech (new): {len(api.looting.data)}\") print(\"Porovnávání", "parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\", Command_Import, argparser=parser,", "= \"\" # Delta totiž může být očividně i negativní if not is_before", "f\"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru", "partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne( \"Nepodařilo se navázat", "rich_print( 'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném módu. Pro", "(po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro", "None = None, description: str | None = None, visible: bool | None", "{note}\") ) first = True for komens in filter( lambda x: x.grade ==", "proces\", ) shell_instance.add_command( shell.Command( \"komens\", Command_Komens, short_help=\"Zobrazí komens zprávy\", argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], )", "args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x", "Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů:", "bool = False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else", "pokračování stiskni klávasu...\") # cls() def Test6(): count_total = 0 count_invalid = 0", "\"Server není dostupný; Chce importovat uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode()", "= AnoNeDialog(\"Chte zobrazit již hotové úkoly?\") # cls() # for homework in homeworks:", "dest=\"test\", metavar=\"ID\", default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí", "parser.add_argument( \"-n\", \"--no-init\", help=\"Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována\",", "prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True,", "# return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento test", "if args.test is not None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()`", "bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if len(output) <= 20: count_invalid += 1 print(\"==============================\")", "return ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt:", "name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False # logging.getLogger(\"bakalariapi\").propagate =", "################################################## def RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t):", "= None): if title is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\")", "class RichTask: def __init__(self, progress: Progress, task_id: TaskID) -> None: self.progress = progress", "přepíše hodnotou \"None\" z argparse) parsed = {k: v for k, v in", "bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False", "/ count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except", "`ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession, args[0]) bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"]", "shell_instance = shell.Shell( # prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(),", "str | None = None): if title is not None: print(title) if isinstance(obj,", "False x = f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login() return x", "uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není", "(Ano)' if default else ' (Ne)')}: \" while True: # ano/true/yes/1 / ne/false/no/0", "bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=True, ) else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False,", "online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids( api, datetime(1, 1, 1), datetime(9999, 12, 31,", "daný soubor `file` v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path):", "unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start:", "for key_press in keys: # if key_press.key == Keys.Escape: # raise SystemExit elif", "None browser: str | None = None executable_path: str | None = None", "se přepíše hodnotou \"None\" z argparse) parsed = {k: v for k, v", "help=\"Odstraní uloženou konfiguraci\", ) subparsers.add_parser( \"check\", help=\"Zobrazí údaje o uložené konfiguraci\", ) subparsers.add_parser(", ") else: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False,", "\"x\", encoding=\"utf-8\"): pass return open(path, mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as", "***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool =", "shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna, neprovede se odlášení sessionů", "str = \"\", default: bool | None = None, color: str | None", "None = None if args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path,", "složce), tudíž selže. if TYPE_CHECKING: from . import shell else: try: from .", "x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející klasifikace:\")", "warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru, ale žádné", "budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly,", "Potrvrdí přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable):", "api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length =", "\"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\",", "= (count_total - count_invalid) / count_total * 100 print(\"Pravděpodobnost úspěšnosti je %.2f%%\" %", "if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True):", "rich.get_console() if file is None else Console(file=file) if color is not None: #", "= False disable_config: bool = False commands: list[str] = field(default_factory=list) args: Args class", "IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID} se", "None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool = False, ): \"\"\" Začne", "True, tak se při klávese Enter ukončí záznam kláves. Pozn.: Pokud True, tak", "dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne shell v 'ukecaném módu'; Lze opakovat", "asynchoní funkci, lepší řešení pro poslední řádku je: ``` await keyhandler(handler) ``` \"\"\"", "studentů je {length}\") try: count = dialog_cislo(\"Kolik zobrazit výsledků najednou?\", 25) except KeyboardInterrupt:", "as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run: if successful_init: def", ") ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser", "id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch,", "Označí úkol jako hotový\", \"\" if obj.done else \"green\"), \"N - Označí úkol", "prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals() print() rich_print(", "input(\"Pro pokračování stiskni klávasu...\") # cls() def Test6(): count_total = 0 count_invalid =", "`shell` jako \"globální\" modul (ne jako \"lokální\" ve složce), tudíž selže. if TYPE_CHECKING:", "že ta zase pro změnu nefugnuje při importu jako modul, jelikož v tom", "možné pracovat se starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False", "v omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\",", "nehotový\", \"Z - Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable):", "def fresh(): if api.is_partial_init: partial_init_notice() return [] output = [] with Progress() as", "módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool:", "is not None: rich_print(o) except: rich_print(\"Test skončil neúspěchem:\", color=\"red\") traceback.print_exc() else: rich_print(f\"Test {ID}", "hodnotu z configu (protože hodnota z configu # se přepíše hodnotou \"None\" z", "delší jak náš současný) f.truncate() print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\")", "serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid():", "done() # elif key_press.key == Keys.F4: # for key_press in keys: # if", "is_before = obj.is_before_start delta = obj.start_time_delta color = \"\" # Delta totiž může", "a `shell` se naimportuje \"přímo\" (resp. ne relativně), což už je v pořádku.", "# known parent package\") a `shell` se naimportuje \"přímo\" (resp. ne relativně), což", "cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with", "only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove = 0 for", "to problém není, tak to neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects))", "task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné pokusy: {la -", "opakovat vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro argparse MUSÍ mít", "in filter( lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade !=", "f\"Bakalarishell připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__", "import create_input from prompt_toolkit.key_binding import KeyPress from prompt_toolkit.keys import Keys from rich.console import", "= shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"import\",", "KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total -", "pokud i po filtrování je \"disable_config\" # v \"parsed\" tak má hodnotu `True`,", "argparse import asyncio import getpass import inspect import json import logging import logging.config", "def start(self): self.progress.start_task(self.task_id) def update( self, total: float | None = None, completed:", "- Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress, done: Callable): key =", "config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with open(config_path, \"r\") as", "if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if", "zkouším načíst ze serveru\") zpravy = fresh() length = len(zpravy) if length ==", "timedelta from typing import IO, TYPE_CHECKING, Any, Callable, cast import bakalariapi import platformdirs", "urllib3.exceptions import InsecureRequestWarning # Takový hack na to, aby `bakalarishell` šel spustit také", "Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh", "color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\")", "return length = len(znamky) print(f\"Známky získány ({length}), zobrazuji...\") cls() count = 1 for", "skončil\" + (\"\" if o is None else \"; Výsledek testu:\") ) if", "{'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k", "shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID", "from rich.logging import RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax", "except FileNotFoundError: pass if args.auto_run: if successful_init: def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask): length", "omezeném módu.\\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\",", "do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import()", "= None): print(text, \"\" if default is None else f\"({default})\") while True: inpt", "| None = None password: str | None = None browser: str |", "print(\"*** Domácí úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\")", "in keys: # if key_press.key == Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt", "task.update(advance=1) def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask): length = len(api.get_all_grades()) task.update(total=length, completed=length) def task_schuzky(api:", "u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka", "except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove = 0 for ukol in", "except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a", "= None password: str | None = None browser: str | None =", "zprava in zpravy: try: show(zprava, f\"*** Zpráva {count} z {length} ***\") count +=", "print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color = \"\" # Delta totiž", "args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger in [ logging.getLogger(name) for", "if hotove + nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly", "except KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh: bool = False):", "neřeším eShrug objects = tuple(map(lambda x: f\"[{color}]{x}[/{color}]\", objects)) return c.print(*objects, sep=sep, end=end, **kwargs)", "\"[progress.description]{task.description}\", BarColumn(), \"[progress.percentage]{task.percentage:>3.0f}%\", \"{task.completed}/{task.total}\", TimeRemainingColumn(), ) as progress: threads: list[threading.Thread] = [] for", "stiskni klávasu...\") # cls() def Test6(): count_total = 0 count_invalid = 0 try:", "({la})\") print() error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress: task =", "jinak se neprofiltrují # a nelze pro daný argument načíst hodnotu z configu", "with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file)", "Syntax from rich.traceback import install as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový", "u ID:\\t{id_mismatch}\" ) return (typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\")", "task_schuzky, False), Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress(", "in ukoly: if ukol.done: hotove += 1 else: nehotove += 1 if hotove", "timedelta(minutes=30): color = \"green\" print_keys( [(\"O - Otevře schůzku v prohlížeči\", color), \"Z", "rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\\nPro přepnutí do plného", "Chce importovat uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server", "return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\")", "Odstraníme data, která jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek", "if obj.done else \"green\"), \"N - Označí úkol jako nehotový\", \"Z - Zobrazí", "známky...\") try: znamky = api.get_grades( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError:", "| None = None, flush: bool = False, color: str | None =", "KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return False if args.password is None:", "***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None = None, force_fresh: bool", "{ID} skončil\" + (\"\" if o is None else \"; Výsledek testu:\") )", "{le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již není podporován... Sadge\") return", "Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True,", "api.session_manager.kill_all(False) print( \"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return Init()", "if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None)", "force_fresh: bool = False): print(\"Načítání úkolů...\") try: if fast: ukoly = api.get_homeworks( bakalariapi.GetMode.FRESH", "= False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m.", "default=None, help=\"Limituje počet zpráv, které se načtou a tím i zrychlí proces\", )", "task_id: TaskID) -> None: self.progress = progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id)", "`KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if key_press.key == \"q\":", "přítomen, program se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower()", "if not is_before and delta >= timedelta(hours=-1): color = \"red\" elif is_before and", "'init'.\", color=\"yellow\", ) def ask_import() -> bool: try: if args.no_import: if dialog_ano_ne( \"Server", "KeyboardInterrupt: print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti =", "hodnotou \"None\" z argparse) parsed = {k: v for k, v in vars(parser.parse_args()).items()", "test: int | None = None auto_run: bool = False no_init: bool =", "\"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední známky:\") note = znamka.note1.strip()", "else: print(\"Žádná konfigurace není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif", "\"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející)", "key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif isinstance(obj, bakalariapi.Student):", "BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import install as", "konfigurace není uložená\") elif cmd == \"save\": save_config() print(\"Konfigurace uložena\") elif cmd ==", "for homework in homeworks: # if not zobrazHotove and homework.Done: # continue #", "parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program", "\"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser() parser.add_argument(\"ID\", help=\"ID testu,", "RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved))", "nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop() except (shell.DummyShellError, KeyboardInterrupt): Command_Konec(False)", "= \"r+\") -> IO: \"\"\"Vrátí file handler na daný soubor `file` v uživatelské", "def dialog_cislo(text: str = \"\", default: int | None = None): print(text, \"\"", "stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\" ) cls() except KeyboardInterrupt: print(\"\\n\")", "1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\" )", "Task(\"Získání úkolů\", task_ukoly), Task(\"Získání známek\", task_znamky), ] def autorun(): with Progress( \"[progress.description]{task.description}\", BarColumn(),", "as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou případně po JSONu,", "rich_print(\"Server běží a přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): #", "se bude ignorovat, tudíž se brát v potaz pouze argumenty z příkazové řádky\",", "rychleji ukončí\", action=\"store_false\", default=True, dest=\"nice\", ) shell_instance.add_command( shell.Command( \"exit\", Command_Konec, argparser=parser, short_help=\"Ukončí shell\",", "přihlašovací údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`,", "schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length} ***\") count +=", "`True`. done_on_enter: Pokud True, tak se při klávese Enter ukončí záznam kláves. Pozn.:", "| None = None): print(text, \"\" if default is None else f\"({default})\") while", "f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash:", "id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(), api.looting.data[typ_old].values(), new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old", "): if first: first = False print(\"Komens zprávy:\") rich_print( f\"Komens zpráva od [magenta]{znamka.sender}[/magenta]", "print(\"Vytváření kopie dat skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie", "= fresh() length = len(zpravy) if length == 0: print(\"Nebyly nalezeny žádné aktualní", "from prompt_toolkit.keys import Keys from rich.console import Console from rich.logging import RichHandler from", "import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import install", "= \"\", default: bool | None = None, color: str | None =", "True) as f: json.dump(api.looting.export_data(), f, ensure_ascii=False) # Odstraníme data, která jsou případně po", "in tasks: thread = threading.Thread( target=task.function, args=( api, RichTask( progress, progress.add_task( task.description, start=task.start,", "task.description, start=task.start, total=0 ), ), ), ) thread.start() threads.append(thread) for thread in threads:", "(api.server_info.version is None) and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce", "žádné aktualní schůzky\") return cls() count = 1 for schuzka in schuzky: try:", "completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if", "selže. if TYPE_CHECKING: from . import shell else: try: from . import shell", "= None, **kwargs, ): c = rich.get_console() if file is None else Console(file=file)", "{new_id_len}\" ) id_len_mismatch += 1 for id_old, obj_old, id_new, obj_new in zip( api.looting.data[typ_old].keys(),", "schůzku {ID.ID} se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le", ") -> bool: message = f\"{text} Ano/Ne{'' if default is None else ('", "z asyncio loopu `KeyboardInterrupt` nepřichází. Příklad: ``` def handler(keys_press: KeyPress, done: Callable): if", "try: if dialog_ano_ne( \"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s", "shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT) _globals = globals() _globals[\"p\"] = rich_print _globals[\"i\"] = rich.inspect shell_instance =", "if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose],", "return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"), True) def Test5(): print(\"Tento", "shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí", "return output if force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky)", "rich_print( f\"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\"", "in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, )", "finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné pokusy: {la", "Zaznamenaný stisk klávesy. done: Funkce, která při zavolání ukončí záznam kláves. Pokud je", "start: bool = True tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False),", "for schuzka in filter( lambda x: today_aware < x.start_time and x.start_time < today_aware", "x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ):", "create_file: bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí file handler na daný", "try: args.username = input(\"Přihlašovací jméno: \") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno", "first: first = False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')}", ") # Všechny argumenty pro argparse MUSÍ mít \"default=None\", jinak se neprofiltrují #", "obj.start_time_delta color = \"\" # Delta totiž může být očividně i negativní if", "- Označí úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\", ] ) def", "time import traceback import warnings import webbrowser from dataclasses import dataclass, field from", "try: with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if", "new_id_len: print( f\"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\"", "na to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky # Pokud", "[(\"O - Otevře schůzku v prohlížeči\", color), \"Z - Zobrazí HTML pozvánky\"] )", "t): rich_print(f\"Zahajuji test {ID}\") try: o = getattr(m, t)() rich_print( f\"Test {ID} skončil\"", "zabezpečené připojení, inicializace nyní proběhne znovu...\" ) return Init() else: return ask_import() except", "c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell", "\"file_name\", nargs=\"?\", help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje", "dest=\"cmd\", parser_class=shell.ShellArgumentParser, ) subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou", "if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove", "{length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool", "x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first = False print(\"Nadcházející klasifikace:\") rich_print(", "\"open\": dirname = os.path.dirname(config_path) # = dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít", "se typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old])", "jelikož v tom případě # hledá modul `shell` jako \"globální\" modul (ne jako", "##### ################################################## def RunTest(ID: int): m = __import__(__name__) t = f\"Test{ID}\" if hasattr(m,", "is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční", "%Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif", "while True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, )", "second=0, microsecond=0) ) first = True for znamka in filter( lambda x: min(lasttime,", "len(new.data[typ_new]) if old_id_len != new_id_len: print( f\"Neshodující se počet záznamů pro typ {typ_old}!", "argparser=parser, spread_arguments=True, aliases=[\"zpravy\"], ) ) shell_instance.add_command( shell.Command( \"znamky\", Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), )", "z daného souboru\", spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno", "serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode()", "file is None else Console(file=file) if color is not None: # Pravděpodobně někdy", "else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try: with get_io_file(TIME_FILE, False) as f:", "- NOSET if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\",", "== \"\" else f\" - {note}\") ) first = True for komens in", "= False print(\"Nadcházející klasifikace:\") rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" )", "continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str]", "spread_arguments=True, ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\",", "serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None,", "if key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší", "threads: list[threading.Thread] = [] for task in tasks: thread = threading.Thread( target=task.function, args=(", "isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start", "else Console(file=file) if color is not None: # Pravděpodobně někdy bude problém, že", "raise SystemExit elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC: raise KeyboardInterrupt elif handler", ") api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if not args.no_init:", "schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in", "dirs.user_data_dir() if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## #####", "len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command)", "JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší", "print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str = \"main\"):", "dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower, #", "print(\"\\n\") break def Command_Studenti(force_fresh: bool = False): print(\"Získávám studenty...\") try: studenti = api.get_students(", "return x return patched bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify`", "is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username,", "se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\",", "if first: first = False print(\"Poslední známky:\") note = znamka.note1.strip() or znamka.note2.strip() rich_print(", "shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou a", "nyní proběhne znovu...\" ) return Init() else: return ask_import() except KeyboardInterrupt: partial_init_mode() return", "requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False", "objects)) return c.print(*objects, sep=sep, end=end, **kwargs) def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat,", "Test4(): print(\"Tento test již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"),", "True) def Test5(): print(\"Tento test již není podporován... Sadge\") return # homeworks =", "progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id", "= api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length", "již hotové úkoly?\") # cls() # for homework in homeworks: # if not", "není tento argument přítomen, program se zeptá za běhu\", nargs=\"?\", default=None, ) parser.add_argument(", "help=\"Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá za běhu\", dest=\"username\",", "in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky = fresh() else: schuzky", "import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\", help=\"Zapne", "webbrowser.open(obj.join_url) elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(meeting_key_handler)) # elif", "inspect import json import logging import logging.config import os import threading import time", "verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, )", "schůzky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) )", "proveden import dat (z hlavního souboru)\", action=\"store_true\", dest=\"no_import\", default=None, ) parser.add_argument( \"-v\", \"--verbose\",", "bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium) successful_init = False if", "inpt = input() else: inpt = input(message) if len(inpt) == 0: if default", "1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Znamky(force_fresh: bool = False): print(\"Získávám známky...\")", "hodnoty filtrujeme, tak pokud i po filtrování je \"disable_config\" # v \"parsed\" tak", "bool: message = f\"{text} Ano/Ne{'' if default is None else (' (Ano)' if", "None = None, completed: float | None = None, advance: float | None", "k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\\n\" f\"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]'", "údaje jsou správné\", color=\"green\") print(\"Nastavuji...\") try: with warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože", "type '{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] |", "else: raise Exception(f\"Undefined type '{type(obj)}' to show\") async def keyhandler( handler: Callable[[KeyPress, Callable[[],", "Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\":", "os.path.exists(config_path): with open(config_path, \"r\") as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\")", ") if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře", "else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try: shell_instance.start_loop()", "5 - NOSET if args.verbose != 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\",", "force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(znamky) print(f\"Známky získány", "default is None: continue return default if inpt.isdecimal(): return int(inpt) print(\"Špatná hodnota\") def", "= False print(\"Úkoly:\") ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\"", "elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color =", "typy! Old: '{typ_old}'; New: '{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len", "znamka.confirmed) else \"\" ) ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands)", "\" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime =", "f.write(datetime.now().isoformat()) if len(args.commands) != 0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command in", "microsecond=0) today_aware = ( datetime.now() .astimezone() .replace(hour=0, minute=0, second=0, microsecond=0) ) first =", "úkol jako nehotový\", \"Z - Zobrazí HTML úkolu\", ] ) def homework_key_handler(key_press: KeyPress,", "Zpráva {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break", "for x in bakalariapi.Browser], type=str.lower, # => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\",", "instance automaticky inicializována\", action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga", "+= 1 input( f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\"", "not None} # Jelikož hodnoty filtrujeme, tak pokud i po filtrování je \"disable_config\"", "\"p\": print(\"Potvrzuji zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None))", "pro poslední řádku je: ``` await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt", "ukol in ukoly: if ukol.done: hotove += 1 else: nehotove += 1 if", "else: inpt = input(message) if len(inpt) == 0: if default is None: continue", "funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None", ") parser.add_argument( \"-c\", \"--command\", help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze", "# prompt=\"[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]\", prompt=\"BakalariAPI Shell>\", allow_python_exec=True, python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True,", "task.update(advance=1) return output if force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if", "obj.is_before_start delta = obj.start_time_delta color = \"\" # Delta totiž může být očividně", "šel spustit také přímo ze zdrojové složky # Pokud se `bakalarishell` spustí jako", "(typ_mismatch, id_mismatch, id_len_mismatch) def Test2(): print(\"Získávám IDčka online schůzek...\") IDs = api._parse( bakalariapi.modules.meetings.getter_meetings_ids(", "\"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\",", "to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky # Pokud se", ").json()[\"data\"][\"remainingTime\"] ) if last < current: print(\"\\n\") break last = current time.sleep(1) print(", "bound = inspect.signature(f).bind(*args, **kwargs) bound.apply_defaults() login = bound.arguments[\"login\"] bound.arguments[\"login\"] = False x =", "patched(*args, **kwargs): # `cast()` protože jsem zatím nepřišel na způsob, jak dostat hint", "and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first: first = False print(\"Poslední známky:\")", "str) -> str: return os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode: str", "(kde \"bakalarishell\" # je složka), tak relativní `import` selže (\"ImportError: attempted relative import", "error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = [] try: with Progress() as progress: task = RichTask(progress, progress.add_task(\"Získávání", "rich.syntax import Syntax from rich.traceback import install as tb_install from urllib3.exceptions import InsecureRequestWarning", "except KeyboardInterrupt: return offset = 0 cls() while offset < length: try: for", "schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as", "argparser=parser, short_help=\"Importuje data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede", "subparsers.add_parser( \"show\", help=\"Zobrazí uloženou konfiguraci\", ) subparsers.add_parser( \"save\", help=\"Uloží současnou konfiguraci\", ) subparsers.add_parser(", "= None username: str | None = None password: str | None =", "if os.path.exists(dirname): webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY", "bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet studentů", "in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku {ID.ID}", "f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3():", "os.remove(config_path) print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif", "api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa serveru\", color=\"red\") partial_init_mode() return False", "KeyboardInterrupt: return offset = 0 cls() while offset < length: try: for _", "def main(): global api global args def load_args_from_config() -> dict | None: global", "běhu\", dest=\"password\", default=None, ) parser.add_argument( \"-b\", \"--browser\", choices=[x.name.lower() for x in bakalariapi.Browser], type=str.lower,", "má smysl pouze pokud parametr `done_on_enter` je `True`. done_on_enter: Pokud True, tak se", "* 100 print(\"Konečná ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN #####", "in vars(parser.parse_args()).items() if v is not None} # Jelikož hodnoty filtrujeme, tak pokud", "asyncio.run(keyhandler(None)) elif isinstance(obj, bakalariapi.Meeting): rich_print(obj.format(True)) print(\"\\n\\n\") is_before = obj.is_before_start delta = obj.start_time_delta color", "KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží a přihlašovací", "KeyboardInterrupt: print(\"\\n\") break def Command_Ukoly(fast: bool = False, force_fresh: bool = False): print(\"Načítání", "def update( self, total: float | None = None, completed: float | None", "# homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove = AnoNeDialog(\"Chte zobrazit již", "(\"H - Označí úkol jako hotový\", \"\" if obj.done else \"green\"), \"N -", "show(znamka, f\"*** Známka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt:", "{ID} nenalezen\", color=\"red\") def Test0(): print(\"Spouštím testování...\") with api.session_manager.get_session_or_create( bakalariapi.sessions.RequestsSession ) as session:", "False): print(\"Získávám studenty...\") try: studenti = api.get_students( bakalariapi.GetMode.FRESH if force_fresh else bakalariapi.GetMode.CACHED_OR_FRESH )", "načíst hodnotu z configu (protože hodnota z configu # se přepíše hodnotou \"None\"", "nemusí řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f:", "na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u) session = cast(bakalariapi.sessions.RequestsSession,", "pořádku. # Pozn.: Pokud někdo dumá nad tím, proč zde tedy není jen", "= __import__(__name__) t = f\"Test{ID}\" if hasattr(m, t): rich_print(f\"Zahajuji test {ID}\") try: o", "f.truncate() print(f\"JSON data vygenerována a zapsána do souboru '{file_name}'\") def Command_Import(file_name: str =", "task.start() for unresolved_id in unresolved: api._resolve(unresolved_id) task.update(advance=1) @dataclass class Task: description: str function:", "na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za", "in filter( lambda x: today_aware < x.start_time and x.start_time < today_aware + timedelta(2),", "i z editoru (i když k tomu nejspíše nikdy nedojde) # (a navíc", "vícekrát pro větší 'ukecanost' (max 5)\", action=\"count\", default=None, ) parser.add_argument( \"-d\", \"--disable-config\", help=\"Soubor", "2 - Info; Pouze BakalářiAPI # 3 - Debug; Pouze BakalářiAPI # 4", "načíst ze serveru\") zpravy = fresh() length = len(zpravy) if length == 0:", "print(\"Tento test již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID", "f, indent=4) def disable_ssl(): def patch(f: Callable): def patched(*args, **kwargs): # `cast()` protože", "heslo\", color=\"yellow\" ) args.password = \"\" api.password = args.password try: rich_print( f\"Kontrola stavu", "except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return", "le = len(error) print( f\"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost:", "shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument(", "bylo {count_invalid} z celkových {count_total}\") probrallity = (count_total - count_invalid) / count_total *", "dataclass, field from datetime import datetime, timedelta from typing import IO, TYPE_CHECKING, Any,", "dané funkce. Args: handler: Funkce do které se passují zaznamenané klávesy. Bere 2", "== \"n\": obj.mark_as_done(api, False) print(\"Úkol označen jako nehotový\") elif key == \"z\": c", "print(\"==============================\") print(f\"Nepodařil se se pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\")", "print(\"Pravděpodobnost úspěšnosti je %.2f%%\" % probrallity) print(\"==============================\") time.sleep(5) except KeyboardInterrupt: print(\"==============================\") print(f\"Nepodařených pokusů", "to rewrite previous text... session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND) ) current = float( session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"]", "rich_print( f\"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}\" ) first = True for", "##### MAIN ##### ################################################## def main(): global api global args def load_args_from_config() ->", "# if key_press.key == Keys.Escape: # raise SystemExit elif not mask_keyboard_interrupt and key_press.key", "partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\",", "= argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web", "přítomna, úkoly budou získány v 'pomalém módu'\", action=\"store_false\", dest=\"fast\", default=True, ) shell_instance.add_command( shell.Command(", ") shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na práci s uloženou konfigurací\", spread_arguments=False,", "= input(\"URL adresa serveru: \") api.server_info.url = args.url except KeyboardInterrupt: rich_print(\"\\nNebyla zadána adresa", "with get_io_file(TIME_FILE, False) as f: lasttime = datetime.fromisoformat(f.read()) except FileNotFoundError: pass if args.auto_run:", "'d')+' stará verze)[/bright_black]'}\\n\" f\"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is", "False if not args.no_init: successful_init = Init() if not args.no_import: try: with get_io_file(\"main\",", "mode, encoding=\"utf-8\") def save_config(): with get_io_file(CONFIG_FILE, True) as f: # Indent, protože chci,", "[cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode()", "{la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test", "HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if key", "stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE ##### ################################################## def Init() -> bool: def", "\") api.username = args.username except KeyboardInterrupt: rich_print(\"\\nNebylo zadáno přihlašovací jméno\", color=\"red\") partial_init_mode() return", "líp :)\", ) if parser.prog == \"\": parser.prog = \"bakalarishell\" parser.add_argument( \"url\", help=\"URL", "tato flaga přítomna, vynutí se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", )", "with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data nebyla načtena, jelikož", ". import shell except ImportError: import shell tb_install(show_locals=True) cls = shell.cls api: bakalariapi.BakalariAPI", "Schůzka {count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break", "Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce. Args: handler: Funkce", "new.data[typ_new].keys(), new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující se ID! Old: '{id_old}';", "bakalariapi.sessions.RequestsSession.__init__ = patch( bakalariapi.sessions.RequestsSession.__init__ ) # Když nastavíme `verify` na `False` (v `requests`", "command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands, ) parser_fresh = shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument(", "\", end: str = \"\\n\", file: IO[str] | None = None, flush: bool", "bool, mode: str = \"r+\") -> IO: \"\"\"Vrátí file handler na daný soubor", "manuálně warnings.simplefilter(\"ignore\") api.init() except KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru, ale", "již není podporován... Sadge\") return # return API.MarkHomeworkAsDone(input(\"ID Úkolu: \"), input(\"ID Studenta: \"),", "for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first: first = False", "def partial_init_notice(): rich_print( 'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném", "konfigurace je {s.st_size}B\" ) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"open\":", "else bakalariapi.GetMode.CACHED_OR_FRESH ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return length = len(studenti) print(f\"Studenti získáni, počet", "api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***\", highlight=False,", "else: print(\"Žádná konfigurace není uložená\") elif cmd == \"open\": dirname = os.path.dirname(config_path) #", "0: if default is None: continue return default input_letter = inpt[0].lower() if input_letter", "\"\", default: int | None = None): print(text, \"\" if default is None", "Všechny funkce nemusejí fungovat správně! ***\", highlight=False, color=\"yellow\", ) def Command_Komens(limit: int |", "passují zaznamenané klávesy. Bere 2 argumenty: key_press: Zaznamenaný stisk klávesy. done: Funkce, která", "__init__(self, progress: Progress, task_id: TaskID) -> None: self.progress = progress self.task_id = task_id", "count = 1 for ukol in ukoly: try: if not zobraz_hotove and ukol.done:", "inpt = create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in", "subparsers.add_parser( \"open\", help=\"Otevře konfigurační složku\", ) shell_instance.add_command( shell.Command( \"config\", Command_Config, argparser=parser, short_help=\"Příkaz na", "+= 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len != new_id_len:", "self.progress = progress self.task_id = task_id def start(self): self.progress.start_task(self.task_id) def update( self, total:", "{ID}\") try: o = getattr(m, t)() rich_print( f\"Test {ID} skončil\" + (\"\" if", "key_press.key == \"q\": done() asyncio.run(keyhandler(handler)) ``` Nebo, pokud máme asynchoní funkci, lepší řešení", "přečtení zprávy\", \"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if", "isinstance(obj, bakalariapi.Student): # pass elif isinstance(obj, bakalariapi.Homework): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys( [ (\"H -", "import dataclass, field from datetime import datetime, timedelta from typing import IO, TYPE_CHECKING,", "bakalariapi.utils import cs_timedelta, parseHTML from prompt_toolkit.input import create_input from prompt_toolkit.key_binding import KeyPress from", "# Delta totiž může být očividně i negativní if not is_before and delta", "enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani else [] for key in", "default=None, ) parser.add_argument( \"-e\", \"--executablePath\", help=\"Cesta ke spustitelnému webdriveru pro prohlížeč, který je", "0 test: int | None = None auto_run: bool = False no_init: bool", "= None, flush: bool = False, color: str | None = None, **kwargs,", "KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně", "return offset = 0 cls() while offset < length: try: for _ in", "načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error) print( f\"Úspěšné", "označen jako nehotový\") elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler))", "cls() count = 1 for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count}", "elif is_before and delta <= timedelta(minutes=5): color = \"yellow\" elif is_before and delta", "in znamky: try: show(znamka, f\"*** Známka {count} z {length} ***\") count += 1", "progress: task = RichTask( progress, progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse(", "\"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv, které se načtou a tím i", "by tímto neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True", "f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d.", "await keyhandler(handler) ``` \"\"\" evnt = asyncio.Event() inpt = create_input() done = lambda:", "inpt = input(message) if len(inpt) == 0: if default is None: continue return", "else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key == \"p\": print(\"Potvrzuji zprávu...\")", "cls() count = 1 for znamka in znamky: try: show(znamka, f\"*** Známka {count}", "return [] output: list[bakalariapi.Komens] = [] with Progress() as progress: task = RichTask(", "zprávy v Lootingu, zkouším načíst ze serveru\") zpravy = fresh() length = len(zpravy)", ") ) parser = shell.ShellArgumentParser() parser.add_argument( \"file_name\", nargs=\"?\", help=\"ID/jméno importu\", default=\"main\", metavar=\"ID\", )", "help=\"Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát\", action=\"append\", dest=\"commands\",", "0: if successful_init: print(\"Vykonávám zadané příkazy...\") for command in args.commands: print(command) shell_instance.proc_string(command) else:", "`False` (v `requests` modulu), `urllib3` si začne stěžovat warnings.filterwarnings(\"ignore\", category=InsecureRequestWarning) ################################################## ##### PŘÍKAZO-FUNKCE", "komens in filter( lambda x: x.grade == \"?\", api.looting.get(bakalariapi.Grade) ): if first: first", "že se vše převádí na string, ale zatím to problém není, tak to", ") task.update(total=length, completed=length) def task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api,", "tato flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí\",", ">= timedelta(hours=-1): color = \"red\" elif is_before and delta <= timedelta(minutes=5): color =", "int | None = None, force_fresh: bool = False): def fresh() -> list[bakalariapi.Komens]:", "schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) == 0: print(\"Žádné schůzky v Lootingu, zkouším načíst", "logging.getLogger(name) for name in logging.root.manager.loggerDict ]: if logger.name.startswith(\"bakalariapi\"): continue logger.propagate = False #", "False print(\"Dnešní a zítřejší schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if", "nad tím, proč zde tedy není jen druhá možnost, tak to je #", "is not None: print(title) if isinstance(obj, bakalariapi.Komens): rich_print(obj.format(True)) print(\"\\n\\n\") print_keys([(\"P - Potrvrdí přečtení", "Keys from rich.console import Console from rich.logging import RichHandler from rich.progress import BarColumn,", "bool: def partial_init_mode(): rich_print( \"\\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\\nPro", "fast_mode=False, unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove =", "return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually", "není tento argument přítomen, program se zeptá za běhu\", dest=\"password\", default=None, ) parser.add_argument(", "bool = False): def fresh(): if api.is_partial_init: partial_init_notice() return [] output = []", "os.path.join(dirs.user_data_dir, file) def get_io_file(file: str, create_file: bool, mode: str = \"r+\") -> IO:", "= 1 for schuzka in schuzky: try: show(schuzka, f\"*** Schůzka {count} z {length}", ") return True print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není", "import traceback import warnings import webbrowser from dataclasses import dataclass, field from datetime", "známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note == \"\"", "# => case-insensitive help=\"Specifikuje WebDriver prohlížeče, který použít\", default=None, ) parser.add_argument( \"-e\", \"--executablePath\",", "str | None = None verbose: int = 0 test: int | None", "žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove}, nehotové {nehotove})\") zobraz_hotove = fast", "is None: try: args.url = input(\"URL adresa serveru: \") api.server_info.url = args.url except", "True: # ano/true/yes/1 / ne/false/no/0 if color is not None: rich_print(message, end=\"\", color=color)", "int(inpt) print(\"Špatná hodnota\") def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True): output = [\"Enter", "/ ne/false/no/0 if color is not None: rich_print(message, end=\"\", color=color) inpt = input()", "= input(message) if len(inpt) == 0: if default is None: continue return default", "%m. %Y')}\" + ( \" [yellow](nepotvrzená)[/yellow]\" if (znamka.need_confirm and not znamka.confirmed) else \"\"", ") ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud je tato flaga přítomna,", "default=None, ) parser.add_argument( \"-a\", \"--auto-run\", help=\"Pokud je tato flaga přítomna, spustí se automatické", ") shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", ) ) parser =", "color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]):", "_ in range(count): if offset >= length: break print(studenti[offset].format()) offset += 1 input(", "end=\"\", color=color) inpt = input() else: inpt = input(message) if len(inpt) == 0:", "webbrowser.open(os.path.realpath(dirname)) else: print(\"Nelze otevřít konfigurační složku, jelikož neexistuje\") ################################################## ##### TESTY ##### ##################################################", "můžete zkusit příkaz \"init\".', color=\"yellow\", ) def dialog_ano_ne( text: str = \"\", default:", "pokusů je {count_invalid} z {count_total}\") probrallity = (count_total - count_invalid) / count_total *", "as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not None: RunTest(args.test) prepare_shell()", "také přímo ze zdrojové složky # Pokud se `bakalarishell` spustí jako modul (=", "'{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})\"", "KeyboardInterrupt: rich_print( \"Nebyly získány informace o stavu serveru, ale žádné funkce by tímto", "({length}), zobrazuji...\") cls() count = 1 for znamka in znamky: try: show(znamka, f\"***", "key_press.key.lower() if key == \"o\": webbrowser.open(obj.join_url) elif key == \"z\": c = Console()", "= fresh() length = len(schuzky) if length == 0: print(\"Nebyly nalezeny žádné aktualní", "None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium)", "serveru\") zpravy = fresh() length = len(zpravy) if length == 0: print(\"Nebyly nalezeny", "(new): {len(api.looting.data)}\") print(\"Porovnávání zahájeno...\") for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()): if typ_old !=", "Command_Znamky, short_help=\"Zobrazí známky\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) shell_instance.add_command( shell.Command( \"schuzky\", Command_Schuzky, short_help=\"Zobrazí (nadcházející) schůzky\",", "%m.')} - {ukol.content}\" ) first = True for znamka in filter( lambda x:", "jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try: if dialog_ano_ne(", "today - timedelta(5)) < x.date1 and x.grade != \"?\", api.looting.get(bakalariapi.Grade), ): if first:", "parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je", "parsed = json.load(f) return parsed parser = argparse.ArgumentParser( description=\"Shell integrující funkcionalitu BakalářiAPI\", epilog=\"Ano,", "rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import Syntax from rich.traceback import", "def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"): print(\"Generace", "f\"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})\" ) cls() except", "progress: task = RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\")", "args.browser is not None: selenium = bakalariapi.SeleniumHandler( bakalariapi.Browser[args.browser.upper()], args.executable_path, ) api = bakalariapi.BakalariAPI(args.url,", "username: str | None = None password: str | None = None browser:", "get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not None:", "print(\"Konfigurace byla vymazána\") else: print(\"Nic se nevykonalo, jelikož konfigurace není uložená\") elif cmd", "api, from_date=None if lasttime is None else lasttime - timedelta(5), ) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved))", "Any, sep: str = \" \", end: str = \"\\n\", file: IO[str] |", "in parsed): from_config = load_args_from_config() if from_config is not None: parsed = from_config", "ukol._sort_by_date rich_print( f\"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first =", "self, total: float | None = None, completed: float | None = None,", "{hotove}, nehotové {nehotove})\") zobraz_hotove = fast or dialog_ano_ne(\"Chte zobrazit již hotové úkoly?\") count", "import argparse import asyncio import getpass import inspect import json import logging import", "stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not", "\"yellow\" elif is_before and delta <= timedelta(minutes=30): color = \"green\" print_keys( [(\"O -", "progress.add_task(\"Získávání schůzek\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id", "highlight=False, color=\"yellow\", ) def Command_Komens(limit: int | None = None, force_fresh: bool =", "(' (Ano)' if default else ' (Ne)')}: \" while True: # ano/true/yes/1 /", "\"bakalarishell\" parser.add_argument( \"url\", help=\"URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen,", "dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet zpráv,", "\"\\n\", file: IO[str] | None = None, flush: bool = False, color: str", "x = f(*bound.args, **bound.kwargs) session.session.verify = False if login: session.login() return x return", "= namespace[\"cmd\"] config_path = get_io_filepath(CONFIG_FILE) if cmd == \"show\": if os.path.exists(config_path): with open(config_path,", "problém, že se vše převádí na string, ale zatím to problém není, tak", "kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) if \"exit\" not in args.commands and (not args.no_import", "\"\" if obj.confirmed else \"green\")]) def komens_key_handler(key_press: KeyPress, done: Callable): if key_press.key ==", "zbývalo \" + str(last) + \" (+ max 1s) do konce a bylo", "first = True for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)): if first:", "= 0 nehotove = 0 for ukol in ukoly: if ukol.done: hotove +=", "úkol jako hotový\", \"\" if obj.done else \"green\"), \"N - Označí úkol jako", "nehotove == 0: print(\"Nebyly nalezeny žádné aktualní úkoly\") return print(f\"Úkoly načteny (hotové {hotove},", "return True if input_letter in \"nf0\": return False def dialog_cislo(text: str = \"\",", "action=\"store_true\", dest=\"no_init\", default=None, ) parser.add_argument( \"--no-import\", help=\"Pokud je tato flaga přítomna, nebude proveden", "RichTask(progress, progress.add_task(\"Získávání schůzek\", total=la)) for ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except", "- le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%\" ) def Test3(): print(\"Tento test již", "počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}\" ) id_len_mismatch += 1", "break def Command_Konec(nice: bool = True): shell_instance.stop_loop() api.kill(nice) def Command_Export(file_name: str = \"main\"):", "shell.ShellArgumentParser(add_help=False) parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se získání dat", "{count} z {length} ***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def", "logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None = None if args.browser is not", "protože chci, aby to šlo přehledně upravit i z editoru (i když k", "unfinished_only=False, only_first_page=False, ) except bakalariapi.exceptions.PartialInitError: partial_init_notice() return hotove = 0 nehotove = 0", "se získání dat ze serveru\", default=False, action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument(", "test již není podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test", "[\"Enter - Pokračování\"] if enter_pokracovani else [] for key in keys: if isinstance(key,", "nehotový\") elif key == \"z\": c = Console() c.print(Syntax(str(parseHTML(obj.content).prettify()), \"html\")) asyncio.run(keyhandler(homework_key_handler)) else: raise", "přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje", "try: show(znamka, f\"*** Známka {count} z {length} ***\") count += 1 cls() except", "def keyhandler( handler: Callable[[KeyPress, Callable[[], None]], None] | None, *, done_on_enter: bool =", "není podporován... Sadge\") return # homeworks = API.GetHomeworks() # print(\"Úkoly načteny...\") # zobrazHotove", "True: count_total += 1 output = api.get_homeworks( bakalariapi.GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False, ) if", "vícekrát\", action=\"append\", dest=\"commands\", default=None, ) # Všechny argumenty pro argparse MUSÍ mít \"default=None\",", "Zobrazí HTML pozvánky\"] ) def meeting_key_handler(key_press: KeyPress, done: Callable): key = key_press.key.lower() if", "hotové úkoly?\") count = 1 for ukol in ukoly: try: if not zobraz_hotove", "Verbose: # 0 - Nic # 1 - Warning; Pouze BakalářiAPI # 2", "None: RunTest(args.test) prepare_shell() # Chceme `main()` locals, ne `prepare_shell()` locals shell_instance.PYTHON_EXEC_LOCALS = locals()", "help=\"Pokud je tato flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy", "True selenium: bakalariapi.SeleniumHandler | None = None if args.browser is not None: selenium", "uložená data?\", True, \"yellow\", ): Command_Import() else: partial_init_mode() else: rich_print( \"Server není dostupný;", "stavu serveru, ale žádné funkce by tímto neměli být ovlivněny\", color=\"yellow\", ) return", "= None, refresh: bool = False, **fields, ): self.progress.update( self.task_id, total=total, completed=completed, advance=advance,", "již není podporován... Sadge\") return # return API.GetHomeworksIDs() def Test4(): print(\"Tento test již", "data z daného souboru\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command(\"init\", Init, short_help=\"Provede (opětovnou) inicializaci\")", "if not inpt: if default is None: continue return default if inpt.isdecimal(): return", "starými daty\", color=\"yellow\", ) partial_init_mode() except KeyboardInterrupt: partial_init_mode() return False if args.url is", "f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version is None) and not api.is_version_supported(): rich_print( \"***", "rich.logging import RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn from rich.syntax import", "BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime:", "formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable): def", "output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1) return output if force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED)", ") shell_instance.add_command( shell.Command( \"studenti\", Command_Studenti, short_help=\"Zobrazí studenty\", argparser=shell.ShellArgumentParser(parents=[parser_fresh]), ) ) parser = shell.ShellArgumentParser()", "as f: rich_print(Syntax(f.read(), \"json\")) else: print(\"Žádná konfigurace není uložená\") elif cmd == \"save\":", "!= 0: logging.basicConfig( level=[ None, \"WARNING\", \"INFO\", \"DEBUG\", \"INFO\", \"NOTSET\", ][args.verbose], datefmt=\"[%X]\", handlers=[RichHandler()],", "else \"\" ) ) with get_io_file(TIME_FILE, True) as f: f.write(datetime.now().isoformat()) if len(args.commands) !=", "neexistuje\", color=\"yellow\", ) else: print(f\"Data ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str,", "get_io_filepath(file) if not os.path.exists(path): if not create_file: raise FileNotFoundError() os.makedirs(os.path.dirname(path), exist_ok=True) with open(path,", "***\", ) count += 1 except KeyboardInterrupt: print(\"\\n\") break def Command_Konec(nice: bool =", "# Jelikož hodnoty filtrujeme, tak pokud i po filtrování je \"disable_config\" # v", "api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception()", "not args.no_import: try: with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if", "ask_import() except KeyboardInterrupt: rich_print(\"Inicializace byla předčasně ukončena\", color=\"yellow\") partial_init_mode() return False rich_print(\"Server běží", "str] | str], enter_pokracovani=True): output = [\"Enter - Pokračování\"] if enter_pokracovani else []", "if \"exit\" not in args.commands and (not args.no_import or args.auto_run): print() today =", "save_config() print(\"Konfigurace uložena\") elif cmd == \"remove\": if os.path.exists(config_path): os.remove(config_path) print(\"Konfigurace byla vymazána\")", "# 4 - Info # 5 - NOSET if args.verbose != 0: logging.basicConfig(", "new.data[typ_new].values(), ): if id_old != id_new: print( f\"Neshodující se ID! Old: '{id_old}'; New:", "parser_fresh.add_argument( \"-f\", \"--fresh\", help=\"Pokud je tato flaga přítomna, vynutí se získání dat ze", "data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\") # Porovnávání typ_mismatch =", "tento argument přítomen, program se zeptá za běhu\", dest=\"username\", nargs=\"?\", default=None, ) parser.add_argument(", "color = \"\" # Delta totiž může být očividně i negativní if not", "[magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}\" + (\"\" if note ==", "ze souboru '{file_name}' byla načtena\") def Command_Config(namespace: dict[str, Any]): cmd = namespace[\"cmd\"] config_path", "rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: # rich.get_console().print_exception() try:", "################################################## ##### MAIN ##### ################################################## def main(): global api global args def load_args_from_config()", "output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return output if force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED)", "short_help=\"Zobrazí informace o serveru\", ) ) parser = shell.ShellArgumentParser() parser.add_argument( \"-f\", \"--force\", help=\"Pokud", "is None else f'[cyan]{api.server_info.version}[/cyan]'}\\n\" f\"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is", "funkce v parametru handler nevolá. mask_keyboard_interrupt: Pokud `True`, tak `KeyboardInterrupt` bude potlačen. Pokud", "ID in IDs: task.update(description=f\"Schůzka {ID.ID}\") try: api._resolve(ID) except bakalariapi.exceptions.BakalariQuerrySuccessError as e: progress.log(f\"Online schůzku", "(new): {type(id_new)})\" ) id_mismatch += 1 print( f\"Porovnávání dokončeno:\\nChyb u typů:\\t{typ_mismatch}\\nChyb u ID:\\t{id_mismatch}\"", "Command_Konec(False) def prepare_shell(): global shell_instance predefined_commands = [x for x in shell.ShellPredefinedCommands] predefined_commands.remove(shell.ShellPredefinedCommands.EXIT)", "pomocí příkazu 'init'.\", color=\"yellow\", ) def ask_import() -> bool: try: if args.no_import: if", "output if force_fresh: schuzky = fresh() else: schuzky = api.get_meetings(bakalariapi.GetMode.CACHED) if len(schuzky) ==", "action=\"store_true\", dest=\"force_fresh\", ) parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"limit\", type=int, nargs=\"?\", default=None, help=\"Limituje počet", "shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True ) CONFIG_FILE = \"config.json\" TIME_FILE =", "force_fresh: zpravy = fresh() else: zpravy = api.get_komens(bakalariapi.GetMode.CACHED) if len(zpravy) == 0: print(\"Žádné", "= True tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\",", "kopie dat skrze export/import...\") data = api.looting.export_data() new = bakalariapi.looting.Looting() new.import_data(data) print(\"Kopie vytvořena\")", "and not api.is_version_supported(): rich_print( \"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně!", "***\") count += 1 cls() except KeyboardInterrupt: print(\"\\n\") break def Command_Schuzky(force_fresh: bool =", "`import`), tak vše proběhne v pořádku # Pokud se ale spustí přes \"python", "in filter( lambda x: (x.need_confirm and not x.confirmed) or min(lasttime, today - timedelta(5))", "inpt.raw_mode(): with inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir,", "v uživatelské (data) složce.\"\"\" path = get_io_filepath(file) if not os.path.exists(path): if not create_file:", "Callable[[], None]], None] | None, *, done_on_enter: bool = True, mask_keyboard_interrupt: bool =", "levelu %s (%s)\", args.verbose, logging.getLevelName(logging.root.level), ) if args.verbose < 4: for logger in", "args = Args(**parsed) # Verbose: # 0 - Nic # 1 - Warning;", "tasks: list[Task] = [ Task(\"Získání Komens zpráv\", task_komens, False), Task(\"Získání schůzek\", task_schuzky, False),", "self.progress.start_task(self.task_id) def update( self, total: float | None = None, completed: float |", "řešit formátování při \"config show\") json.dump(args.__dict__, f, indent=4) def disable_ssl(): def patch(f: Callable):", "python_exec_prefix=\" \", python_exec_globals=_globals, python_exec_locals=locals(), predefined_commands=predefined_commands, command_exception_traceback=True, command_exception_traceback_locals=True, command_exception_reraise=False, raise_on_ctrlc=True, end_on_ctrlc=True, dummy_shell=\"exit\" in args.commands,", "disable_config: bool = False commands: list[str] = field(default_factory=list) args: Args class RichTask: def", "current time.sleep(1) print( \"Sezení bylo prodlouženo, když zbývalo \" + str(last) + \"", "úkol ***\") # print(homework.Format()) # print(\"\\n\\n\") # input(\"Pro pokračování stiskni klávasu...\") # cls()", "help=\"ID/jméno exportu\", default=\"main\", metavar=\"ID\", ) shell_instance.add_command( shell.Command( \"export\", Command_Export, argparser=parser, short_help=\"Exportuje data z", "= create_input() done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys:", "str | None = None ) -> bool: message = f\"{text} Ano/Ne{'' if", "if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError:", ") unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit] task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0]) task.update(advance=1)", "None auto_run: bool = False no_init: bool = False no_import: bool = False", "ukončí záznam kláves. Pokud je `None`, nic se nevolá. Hodnota `None` má smysl", "úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o serveru\", )", "input() else: inpt = input(message) if len(inpt) == 0: if default is None:", "které následně passuje do dané funkce. Args: handler: Funkce do které se passují", "warnings.catch_warnings(): # Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init()", "funkcionalitu BakalářiAPI\", epilog=\"Ano, ano, ano... Actually je to web scraper, ale API zní", "rich_print( f\"Bakalarishell připraven - verze BakalářiAPI je \" + f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in", ") if not (api.server_info.version is None) and not api.is_version_supported(): rich_print( \"*** Jiná verze", "= \"main\"): try: with get_io_file(file_name, False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: rich_print( f\"Data", "# logging.getLogger(\"bakalariapi\").propagate = True selenium: bakalariapi.SeleniumHandler | None = None if args.browser is", "print(\"Nastaveno:\") ServerInfo() return True def ServerInfo(): rich_print( f\"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if", "shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", ) try:", "inpt.attach(lambda: key_handler_proc(inpt.read_keys())): await evnt.wait() def get_io_filepath(file: str) -> str: return os.path.join(dirs.user_data_dir, file) def", "else: return ask_import() except KeyboardInterrupt: partial_init_mode() return False except requests.exceptions.RequestException: return ask_import() except", "ravděpodobnost úspěšnosti je %.2f%%\" % probrallity) ################################################## ##### MAIN ##### ################################################## def main():", "print(command) shell_instance.proc_string(command) else: rich_print( \"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci\", color=\"yellow\", )", "default is None: continue return default input_letter = inpt[0].lower() if input_letter in \"aty1\":", "{ukol.submission_date.strftime('%d. %m.')} - {ukol.content}\" ) first = True for znamka in filter( lambda", "parser = shell.ShellArgumentParser(parents=[parser_fresh]) parser.add_argument( \"-s\", \"--slow\", help=\"Pokud je tato flaga přítomna, úkoly budou", "pro uživatele [cyan]{api.username}[/cyan]...\", highlight=False, ) try: if not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\",", "argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\", ServerInfo, short_help=\"Zobrazí informace o", "pokus číslo {count_total}\") print(f\"Nepodařených pokusů je {count_invalid} z {count_total}\") probrallity = (count_total -", "task_id def start(self): self.progress.start_task(self.task_id) def update( self, total: float | None = None,", "with get_io_file(\"main\", False) as f: api.looting.import_data(json.loads(f.read())) except FileNotFoundError: pass if args.test is not", "first = True for znamka in filter( lambda x: min(lasttime, today - timedelta(5))", "url: str | None = None username: str | None = None password:", "zprávu...\") obj.confirm(api) print(\"Zpráva potvrzena\") asyncio.run(keyhandler(komens_key_handler)) elif isinstance(obj, bakalariapi.Grade): rich_print(obj.format(True)) print(\"\\n\\n\") asyncio.run(keyhandler(None)) elif isinstance(obj,", "tímto neměli být ovlivněny\", color=\"yellow\", ) return True print(\"Nastaveno:\") ServerInfo() return True def", "shell_instance.add_command( shell.Command( \"ukoly\", Command_Ukoly, argparser=parser, short_help=\"Zobrazí úkoly\", spread_arguments=True, ) ) shell_instance.add_command( shell.Command( \"server\",", "not api.is_login_valid(): rich_print(\"Přihlašovací údaje jsou neplatné\", color=\"red\") partial_init_mode() return False except requests.exceptions.SSLError: #", "install as tb_install from urllib3.exceptions import InsecureRequestWarning # Takový hack na to, aby", "if isinstance(key, tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\",", "hotový\", \"\" if obj.done else \"green\"), \"N - Označí úkol jako nehotový\", \"Z", "tuple): if key[1] == \"\": output.append(key[0]) else: output.append(f\"[{key[1]}]{key[0]}[/{key[1]}]\") else: output.append(key) rich_print(\", \".join(output)) def", "Chcete pokračovat s nezabezpečeným připojením?\", False, \"yellow\", ): disable_ssl() api.session_manager.kill_all(False) print( \"Deaktivovalo se", "už je v pořádku. # Pozn.: Pokud někdo dumá nad tím, proč zde", "api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash ==", "unresolved = api._parse( bakalariapi.modules.meetings.getter_future_meetings_ids(api) ).get(bakalariapi.UnresolvedID) task.update(total=len(unresolved)) for unresolved_id in unresolved: output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0]) task.update(advance=1) return", "23, 59, 59) ) ).get(bakalariapi.UnresolvedID) la = len(IDs) print(f\"IDčka online schůzek získany ({la})\")", "Task: description: str function: Callable[[bakalariapi.BakalariAPI, RichTask], None] start: bool = True tasks: list[Task]", "= RichTask( progress, progress.add_task(\"Získávání zpráv\", start=False, total=0) ) unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids(api) ).get(bakalariapi.UnresolvedID)[:limit]", "None = None auto_run: bool = False no_init: bool = False no_import: bool", "not inpt: if default is None: continue return default if inpt.isdecimal(): return int(inpt)", "předpokládá se prázdné heslo\", color=\"yellow\" ) args.password = \"\" api.password = args.password try:", "True: last = session.get( api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO) ).json()[\"data\"][\"remainingTime\"] print(\"\\r\", end=\"\") while True: print( \"Současný zbývající", "'{typ_new}'\") typ_mismatch += 1 continue old_id_len = len(api.looting.data[typ_old]) new_id_len = len(new.data[typ_new]) if old_id_len", "`False`, `KeyboardInterrupt` bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio", "se nepodařilo načíst\") error.append(ID) finally: task.update(advance=1) except KeyboardInterrupt: pass finally: le = len(error)", "(not args.no_import or args.auto_run): print() today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) today_aware =", "default: int | None = None): print(text, \"\" if default is None else", "# Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně warnings.simplefilter(\"ignore\") api.init() except", "k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\\n\", highlight=False, ) if not (api.server_info.version", "= False): def fresh(): if api.is_partial_init: partial_init_notice() return [] output = [] with", "schůzky:\") rich_print( f\"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else", "if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\\n\" f\"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash", "from urllib3.exceptions import InsecureRequestWarning # Takový hack na to, aby `bakalarishell` šel spustit", "done = lambda: evnt.set() def key_handler_proc(keys: list[KeyPress]): for key_press in keys: if done_on_enter", "import Console from rich.logging import RichHandler from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn", "import time import traceback import warnings import webbrowser from dataclasses import dataclass, field", "cls = shell.cls api: bakalariapi.BakalariAPI shell_instance: shell.Shell dirs = platformdirs.PlatformDirs( appauthor=\"BakalariAPI\", appname=\"bakalarishell\", roaming=True", "f\"[green_yellow]{bakalariapi.__version__}[/green_yellow]\" if \"dev\" in bakalariapi.__version__ else f\"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]\" ) lasttime: datetime = datetime.max try:", "bude propagován. Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt`", "rich_print(\", \".join(output)) def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None): if title", "task_komens(api: bakalariapi.BakalariAPI, task: RichTask): unresolved = api._parse( bakalariapi.modules.komens.getter_komens_ids( api, from_date=None if lasttime is" ]
[ "from controller.base import * class UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None):", "flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please enter only filename suffix like", "raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')]", "return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect", "Sorry, but the Lexxer doesn't exist. Please enter only filename suffix like .rb", "render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide", "creating syntax code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)]", "key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return", "return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try:", "controller.base import * class UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None): try:", "redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff. Please", "@route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide')", "a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist.", "hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please enter only", "elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return", "only filename suffix like .rb or .py \"\"\" return render_template('show.haml', key=ckey[0], flash=flash, code=hcode)", "enter only filename suffix like .rb or .py \"\"\" return render_template('show.haml', key=ckey[0], flash=flash,", "class UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None): try: flash=None if key", "try: flash=None if key == 'new': return render_template('new.haml') elif key: return self.__show(key) except", "flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key):", "def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode =", "'new': return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element.", "= (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while", "get(self, key=None): try: flash=None if key == 'new': return render_template('new.haml') elif key: return", "code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a =", "except: return render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff. Please retry.\"\"\") def", "hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer", "flash=\"\"\" Error while creating syntax code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.')", "return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml', flash=flash)", "key=None): try: flash=None if key == 'new': return render_template('new.haml') elif key: return self.__show(key)", "stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0])", "keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except:", "def get(self, key=None): try: flash=None if key == 'new': return render_template('new.haml') elif key:", "def index(self): return render_template('index.haml') def get(self, key=None): try: flash=None if key == 'new':", "hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error", "key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False", "key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml',", "__show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1])", "Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def", "= a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't", "doesn't exist. Please enter only filename suffix like .rb or .py \"\"\" return", "= Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry,", "index(self): return render_template('index.haml') def get(self, key=None): try: flash=None if key == 'new': return", "UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None): try: flash=None if key ==", "== 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating syntax", "key == 'new': return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find", "the Lexxer doesn't exist. Please enter only filename suffix like .rb or .py", "flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide =", "= a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please enter only filename", "render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\"", "(True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating", "a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please enter only filename suffix", "return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'),", "import * class UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None): try: flash=None", "exist. Please enter only filename suffix like .rb or .py \"\"\" return render_template('show.haml',", "= ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode =", "retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode", "return render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff. Please retry.\"\"\") def __show(self,", "render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff. Please retry.\"\"\") def __show(self, key):", "a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\"", "if key == 'new': return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't", "return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff.", "post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml',", "Lexxer doesn't exist. Please enter only filename suffix like .rb or .py \"\"\"", "Please enter only filename suffix like .rb or .py \"\"\" return render_template('show.haml', key=ckey[0],", "mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except:", "Error while creating syntax code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey", "Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but", "ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode", "except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def", "Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide))", "syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code,", "* class UI(FlaskView): def index(self): return render_template('index.haml') def get(self, key=None): try: flash=None if", "def post(self): try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return", "try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the", "hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating syntax code stuff. Please retry.\"\"\")", "while creating syntax code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey =", "self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw')", "element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\")", "try: hide = (True,False)[bool(request.form.get('hide') == 'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\"", "== 'new': return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound: flash=\"Couldn't find syntax", "Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try:", "render_template('index.haml') def get(self, key=None): try: flash=None if key == 'new': return render_template('new.haml') elif", "but the Lexxer doesn't exist. Please enter only filename suffix like .rb or", "flash=False except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please", "flash=None if key == 'new': return render_template('new.haml') elif key: return self.__show(key) except CodeNotFound:", "def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self): try: hide = (True,False)[bool(request.form.get('hide') ==", "CodeNotFound: flash=\"Couldn't find syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self,", "back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return Response(Code.find(key).code, mimetype=\"text/plain\") def post(self):", "((key,'txt'),keylist)[bool(len(keylist)>1)] a = Code.find(ckey[0]) try: hcode = a.highlight('.'+ckey[1]) flash=False except: hcode = a.highlight('.txt')", "except: hcode = a.highlight('.txt') flash=\"\"\" Sorry, but the Lexxer doesn't exist. Please enter", "syntax code stuff. Please retry.\"\"\") def __show(self, key): keylist=key.split('.') ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)] a", "find syntax element. Redirect back!\" return render_template('new.haml', flash=flash) @route('/<key>/raw') def raw(self, key): return", "'true')] return redirect('/'+Code.new(request.form.get('code'), hide)) except: return render_template('new.haml', flash=\"\"\" Error while creating syntax code", "return render_template('index.haml') def get(self, key=None): try: flash=None if key == 'new': return render_template('new.haml')" ]
[ "if missing_tests: for test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct", "defaule value for this program\" continue if output == OUTPUTS[test]: correct += 1", "'99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int':", "= subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb,", "\"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\",", "= re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false':", "'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false':", "'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left':", "\"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\",", "'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem':", "ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13',", "try: OUTPUTS[test] except KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print", "decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in enumerate(tests, 1): try:", "\"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false':", "'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8:", "'10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space',", "ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0',", "enumerate(tests, 1): try: if test in INPUTS: command = [\"./TestPascalis -s tests/{} <", "else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program", "excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if", "tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = {", "'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value)", "'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum", "'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true':", "'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych',", "'0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum", "\"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\",", "{}\".format(numb, decorate_yellow(test), output) print \"There is defaule value for this program\" continue if", "for this program\" continue if output == OUTPUTS[test]: correct += 1 print \"{}.", "'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko',", "'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right':", "set(tests) if missing_tests: for test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if", "test in enumerate(tests, 1): try: if test in INPUTS: command = [\"./TestPascalis -s", "'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables':", "test, decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests)", "'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0':", "'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while':", "for numb, test in enumerate(tests, 1): try: if test in INPUTS: command =", "'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false':", "432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char':", "111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints':", "'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings',", "'{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test]", "23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c',", "ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1',", "\"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\",", "'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false':", "'10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110',", "{}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print", "output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program {}", "est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10',", "'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple':", "= len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in missing_tests:", "\"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed", "'3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000',", "1): try: if test in INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test,", "missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in missing_tests: print 'missing", "z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma", "'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct':", "value for this program\" continue if output == OUTPUTS[test]: correct += 1 print", "'6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char':", "'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char':", "'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', }", "\"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\",", "OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for test", "'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param':", "0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int':", "\"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\",", "'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings':", "'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param':", "'-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula", "\"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {} is", "'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string':", "{}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests", "\"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\",", "< inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)])", "decorate_yellow(test), output) print \"There is defaule value for this program\" continue if output", "'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus", "is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else: print decorate_red(\"{}/{} tests passed.\".format(correct, test_count))", "decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test", "{} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb, test,", "\"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\",", "'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value)", "\"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\",", "print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else: print decorate_red(\"{}/{} tests", "'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800',", "'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int':", "'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut", "test in INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output =", "len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in missing_tests: print", "re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\",", "'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum',", "falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9:", "\"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\",", "'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right':", "re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS =", "def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct", "decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct =", "else: if correct == test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct,", "'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2':", "if correct == test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct))", "second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut", "'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true':", "'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false':", "'10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n',", "\" \", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el':", "3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces':", "'0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy", "'109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum", "print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests =", "'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut',", "'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out',", "'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal", "verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1:", "this program\" continue if output == OUTPUTS[test]: correct += 1 print \"{}. Test", "{}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for", "'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam", "'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true':", "'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def", "'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno 1972, <NAME>", "\"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule value for", "'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita", "{ 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c',", "\", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\",", "\"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\",", "'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus", "'12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz", "print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue try:", "= set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in missing_tests: print 'missing test", "'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11',", "'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true':", "vita Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample':", "\"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\",", "'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou',", "'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa", "'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0':", "'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str':", "'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6':", "ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2':", "\"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\",", "'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum',", "'10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5:", "'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false':", "'\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for", "'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl':", "'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false':", "kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800',", "{} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test])", "'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z", "test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in", "output == OUTPUTS[test]: correct += 1 print \"{}. Test {} is {}.\".format(numb, test,", "dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion':", "'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum',", "'123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb',", "'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input',", "'missing test {}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything is ok.\") print", "'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus',", "'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string':", "'10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = {", "print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {}", "'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum',", "'24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum", "verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2:", "'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109',", "\"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\",", "shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}.", "\"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\",", "'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false':", "be excecuted')) print exc continue try: OUTPUTS[test] except KeyError: print \"{}. Program {}", "'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum',", "10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6:", "'\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in enumerate(tests,", "'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99',", "import re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS", "def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in enumerate(tests, 1):", "'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS =", "12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80',", "OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\",", "test, decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output", "etiam pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param':", "'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true':", "\"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot be", "-s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\",", "\"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\",", "return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in", "'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false':", "= 0 for numb, test in enumerate(tests, 1): try: if test in INPUTS:", "= subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program':", "'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum',", "'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true':", "except KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There is", "== OUTPUTS[test]: correct += 1 print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\"))", "\"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\",", "subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print", "'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char':", "= { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char':", "\"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings':", "command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else:", "'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false':", "decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests", "'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9',", "perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param':", "nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var':", "'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000", "'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma", "} def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value)", "'\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in enumerate(tests, 1): try: if test", "'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params':", "'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true':", "= subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc:", "'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int':", "'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno", "excecuted')) print exc continue try: OUTPUTS[test] except KeyError: print \"{}. Program {} results", "'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings':", "verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array':", "Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule value for this", "verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3:", "moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum',", "\"There is defaule value for this program\" continue if output == OUTPUTS[test]: correct", "print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule value", "orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota',", "'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false':", "Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output,", "} INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints':", "'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true':", "'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange':", "1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910',", "52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12',", "in INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command,", "print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted", "\"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\",", "\"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second',", "'3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return", "'9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable':", "anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict':", "== test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else: print", "'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7:", "'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive':", "test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct == test_count: print", "{}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests)", "\"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\",", "\"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted =", "= {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) - set(tests) if missing_tests:", "string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out':", "else: print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {},", "'1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est", "'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true':", "set(OUTPUTS.keys()) - set(tests) if missing_tests: for test in missing_tests: print 'missing test {}'.format(decorate_red(test))", "strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't',", "in enumerate(tests, 1): try: if test in INPUTS: command = [\"./TestPascalis -s tests/{}", "2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop':", "\"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\",", "'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def", "decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output =", "'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2':", "'1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie", "'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut", "KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule", "1 print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test", "'first second', 'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1':", "\"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\",", "'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false':", "'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int':", "OUTPUTS[test]: correct += 1 print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else:", "subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program': \"1\",", "is {}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count", "'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.',", "results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule value for this program\" continue", "2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10',", "missing_tests: for test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct ==", "'1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0',", "123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251',", "'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true':", "'t', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112',", "'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false':", "'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value):", "tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\",", "'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum", "print 'missing test {}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything is ok.\")", "'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum',", "'1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123',", "'11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n',", "+= 1 print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}.", "'9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo", "'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value):", "'80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis", "continue if output == OUTPUTS[test]: correct += 1 print \"{}. Test {} is", "continue try: OUTPUTS[test] except KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output)", "'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first", "'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true':", "tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false':", "'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write':", "'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j',", "'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def", "tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if': \"1\",", "\"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\",", "'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\",", "numb, test in enumerate(tests, 1): try: if test in INPUTS: command = [\"./TestPascalis", "{}.\".format(numb, test, decorate_red(\"failed\")) print \"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count =", "'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1':", "subprocess import re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split()", "placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param':", "'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6',", "'-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De", "'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta", "\"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot", "'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value):", "correct = 0 for numb, test in enumerate(tests, 1): try: if test in", "'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1':", "'10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10',", "'3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return", "'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka',", "{ 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\",", "'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum',", "21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123', 'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2':", "return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb, test in enumerate(tests, 1): try: if", "'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false':", "except subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted'))", "<NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function':", "'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1',", "\"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\", 'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\",", "\"tests\"]) tests = re.sub(\"[^\\w]\", \" \", tests_raw).split() OUTPUTS = { 'program': \"1\", 'test_if':", "\"1\", 'test_if_el_false': \"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\",", "'test_neq_bool_true_false': \"1\", 'test_neq_bool_false_false': \"0\", 'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false':", "'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4:", "'De vita Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia',", "'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum',", "test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else: print decorate_red(\"{}/{}", "Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10',", "'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c',", "'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string': 'jajka', 'test_array_construct': '13', 'test_sub_str_var': 't', 'test_block_variables':", "'3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum', 'test_function_with_variable_params':", "exc continue try: OUTPUTS[test] except KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test),", "trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800',", "'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1':", "out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord':", "if output == OUTPUTS[test]: correct += 1 print \"{}. Test {} is {}.\".format(numb,", "= { 'program': \"1\", 'test_if': \"1\", 'test_if_false': \"\", 'test_if_el': \"1\", 'test_if_el_false': \"0\", 'test_eq_int':", "'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy',", "'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure':", "\"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\",", "'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content': 'mleko', 'test_change_one_letter_string':", "INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints',", "'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple':", "'1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x', 'test_eq_var_int': 'falsum', 'test_change_string_content':", "'test_array_length': '10', 'test_function_return_array': '1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10:", "test, decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test] except KeyError: print \"{}.", "'1: 0\\n2: 2\\n3: 3\\n4: 12\\n5: 21\\n6: 52\\n7: 111\\n8: 123\\n9: 432\\n10: 23423\\n', 'test_lege_int_simple': '123',", "output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as", "\"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string':", "'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n',", "as exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc", "1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10',", "pellicula perfecta est anno 1972, <NAME> moderatore.', 'test_lege_3_strings': 'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1',", "'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6': '6', 'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool':", "out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut", "def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0 for numb,", "import subprocess import re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \" \",", "'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars',", "print exc continue try: OUTPUTS[test] except KeyError: print \"{}. Program {} results {}\".format(numb,", "'3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls': 'falsumverum verum verum", "INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError", "program\" continue if output == OUTPUTS[test]: correct += 1 print \"{}. Test {}", "Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb,", "\"0\", 'test_eq_int': \"1\", 'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\",", "\"output = {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys())", "'test_lege_minus_int': '-10', 'test_lege_int_white_spaces': '80', 'test_lege_int_white_spaces2': '-12', 'test_lege_3_ints': '1251', 'test_lege_char': 'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string':", "decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else: print decorate_red(\"{}/{} tests passed.\".format(correct,", "'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\", 'test_lt_true': \"1\", 'test_lt_false': \"0\", 'test_gt_true':", "'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9', 'test_while': '1112', 'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not':", "'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true':", "test {}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{}", "in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything", "'test_10_minus_1': '9', 'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki',", "print \"There is defaule value for this program\" continue if output == OUTPUTS[test]:", "'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala", "correct += 1 print \"{}. Test {} is {}.\".format(numb, test, decorate_green(\"ok\")) else: print", "INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True)", "\"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char': 'c', 'test_print_true':", "'test_neq_bool_false_true': \"1\", 'test_and_true_true': \"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false':", "missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct == test_count: print decorate_green(\"Everything is", "exc: print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue", "\"1\", 'test_ge_false': \"0\", 'test_eq_char_false': \"0\", 'test_eq_char_true': \"1\", 'test_eq_string_true': \"1\", 'test_print_string': \"printed string\", 'test_print_char':", "'test_char_ord': '99', 'test_decl_int': '1', 'test_decl_bool': 'verum', 'test_decl_str': 'michal ma nowy strych', 'test_decl_char': 'x',", "python import subprocess import re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\", \"", "inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except", "0 for numb, test in enumerate(tests, 1): try: if test in INPUTS: command", "'test_lege_int_white_spaces2': 'white_spaces2', 'test_lege_3_ints': '3_ints', 'test_lege_char': 'c', 'test_lege_10_char': '10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', }", "\"1\", 'test_and_true_false': \"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\",", "\"1\", 'test_lt_false': \"0\", 'test_gt_true': \"1\", 'test_gt_false': \"0\", 'test_le_true': \"1\", 'test_le_false': \"0\", 'test_ge_true': \"1\",", "'falsumverum verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length':", "program {} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test] except", "is {}.\".format(numb, test, decorate_green(\"ok\")) else: print \"{}. Test {} is {}.\".format(numb, test, decorate_red(\"failed\"))", "= [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output", "[\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output = subprocess.check_output(command, shell=True) else: output =", "{}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test] except KeyError: print", "output) print \"There is defaule value for this program\" continue if output ==", "ty!raz dwa trzy', 'test_procedure_with_param': '1110', 'test_procedure_with_two_params': 'ala ma kota', 'test_procedure_with_variable_param': 'verum', 'test_procedure_with_many_variables': '24',", "{} results {}\".format(numb, decorate_yellow(test), output) print \"There is defaule value for this program\"", "'kochammojestudia', 'test_dict_sample': '10', 'test_dict_param': '1', 'test_dict_variable_param': '10', 'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS", "'test_for_loop': '12345678910', 'test_bool_not': 'falsum', 'test_array_write': '10\\n20\\n30\\n40\\n50\\n60\\n70\\n80\\n90\\n100\\n', 'test_procedure': 'Maslo z orzechamiLubie placki', 'test_global_variable': 'teraz", "'10_chars', 'test_lege_string': 'string_input', 'test_lege_3_strings': '3_strings', } def decorate_green(value): return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return", "try: if test in INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])]", "#!/usr/bin/env python import subprocess import re tests_raw = subprocess.check_output([\"ls\", \"tests\"]) tests = re.sub(\"[^\\w]\",", "'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces': 'int_with_white_space', 'test_lege_int_white_spaces2': 'white_spaces2',", "verum verum verum verum verum', 'test_function_with_variable_params': '10000 10000', 'test_array_function_param': 'verum falsum', 'test_array_length': '10',", "return '\\033[1;32m{}\\033[1;m'.format(value) def decorate_red(value): return '\\033[1;31m{}\\033[1;m'.format(value) def decorate_yellow(value): return '\\033[1;33m{}\\033[1;m'.format(value) correct = 0", "for test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else: if correct == test_count:", "'13', 'test_sub_str_var': 't', 'test_block_variables': 'titulus 2titulus 1', 'test_stmt_decl': 'titulus', 'test_x_plus_1': '11', 'test_x_minus_1': '9',", "'c', 'test_lege_10_char': 'abc\\n10\\nyhb', 'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno 1972,", "\"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true': \"0\", 'test_neq_bool_true_true': \"0\",", "'c', 'test_print_true': 'verum', 'test_print_false': 'falsum', 'test_le_string': 'falsum', 'test_add_strings': 'first second', 'test_python_str_cut_out': 'cut out',", "decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test] except KeyError: print \"{}. Program", "- set(tests) if missing_tests: for test in missing_tests: print 'missing test {}'.format(decorate_red(test)) else:", "OUTPUTS[test] except KeyError: print \"{}. Program {} results {}\".format(numb, decorate_yellow(test), output) print \"There", "'test_function_return_dict': '10', 'test_function_declaration_in_function': 'verum', } INPUTS = { 'test_lege_int_simple': 'cat_123', 'test_lege_minus_int': 'cat_minus_10', 'test_lege_int_white_spaces':", "subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print '{}. program {} {}'.format(numb, test,", "is defaule value for this program\" continue if output == OUTPUTS[test]: correct +=", "\"0\", 'test_and_false_true': \"0\", 'test_and_false_false': \"0\", 'test_or_true_true': \"1\", 'test_or_true_false': \"1\", 'test_or_false_true': \"1\", 'test_or_false_false': \"0\",", "= {}, excepcted = {}\".format(output, OUTPUTS[test]) test_count = len(tests) missing_tests = set(OUTPUTS.keys()) -", "subprocess.check_output(command, shell=True) else: output = subprocess.check_output([\"./TestPascalis\", \"-s\", \"tests/{}\".format(test)]) except subprocess.CalledProcessError as exc: print", "'test_python_str_cut_out': 'cut out', 'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out',", "'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou', 'test_string_elem': 'j', 'test_string_elem_2': 'n', 'test_string_length_0': '0', 'test_string_length_6':", "{} {}'.format(numb, test, decorate_red('cannot be excecuted')) print exc continue try: OUTPUTS[test] except KeyError:", "'test_procedure_with_many_variables': '24', 'test_procedure_recursion': '3628800', 'test_function_return_0': '0', 'test_function_recursive': '3628800', 'test_function_recursive2': '3628800', 'test_variables_strange': '109', 'test_function_recursive_with_decls':", "if test in INPUTS: command = [\"./TestPascalis -s tests/{} < inputs/{}\".format(test, INPUTS[test])] output", "correct == test_count: print decorate_green(\"Everything is ok.\") print decorate_green(\"{}/{} tests passed.\".format(correct, correct)) else:", "'test_eq_int_false': \"0\", 'test_neq_int': \"0\", 'test_neq_int_false': \"1\", 'test_eq_bool_true_true': \"1\", 'test_eq_bool_true_false': \"0\", 'test_eq_bool_false_false': \"1\", 'test_eq_bool_false_true':", "'test_python_str_cut_out_left': 'out', 'test_python_str_cut_out_right': 'cut', 'test_python_str_left_right': 'ut ou', 'test_python_str_left_1': 'ut out', 'test_python_str_right_6': 'cut ou'," ]
[ "= x self.left = None self.right = None class Solution: tilt = 0", "= TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right =", "not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum", "root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right = TreeNode(19) ob = Solution() print(ob.findTilt(root))", "self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not root: return 0 leftSum =", "findTiltHelper(self, root): if not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right)", "int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not root: return 0 leftSum", "+= abs(leftSum - rightSum) return leftSum + rightSum + root.val root = TreeNode(10)", "root = TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left", "return leftSum + rightSum + root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left", "self.tilt += abs(leftSum - rightSum) return leftSum + rightSum + root.val root =", "leftSum + rightSum + root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left =", "= TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left =", "TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right = TreeNode(19) ob = Solution()", "rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum + rightSum +", "self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum + rightSum", "root: return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum -", "self.right = None class Solution: tilt = 0 def findTilt(self, root: TreeNode) ->", "0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return", "tilt = 0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def", "class TreeNode: def __init__(self, x): self.val = x self.left = None self.right =", "+ root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right =", "= self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum +", "x): self.val = x self.left = None self.right = None class Solution: tilt", "None class Solution: tilt = 0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root)", "TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15)", "self.tilt def findTiltHelper(self, root): if not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum", "= None class Solution: tilt = 0 def findTilt(self, root: TreeNode) -> int:", "class Solution: tilt = 0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return", "if not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt +=", "TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None", "root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right", "root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right", "rightSum + root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right", "__init__(self, x): self.val = x self.left = None self.right = None class Solution:", "self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum + rightSum + root.val root", "= TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right = TreeNode(19) ob =", "= None self.right = None class Solution: tilt = 0 def findTilt(self, root:", "= TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right =", "return self.tilt def findTiltHelper(self, root): if not root: return 0 leftSum = self.findTiltHelper(root.left)", "rightSum) return leftSum + rightSum + root.val root = TreeNode(10) root.left = TreeNode(3)", "= 0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self,", "TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7)", "= TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right =", "TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13)", "TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right = TreeNode(19)", "TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9)", "abs(leftSum - rightSum) return leftSum + rightSum + root.val root = TreeNode(10) root.left", "self.val = x self.left = None self.right = None class Solution: tilt =", "root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left", "root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8)", "root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right = TreeNode(19) ob", "findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not", "TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not root: return", "def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if", "root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left = TreeNode(7) root.left.right.right", "= TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right =", "root): if not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt", "TreeNode(7) root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17)", "= TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2) root.left.right = TreeNode(8) root.left.right.left =", "self.left = None self.right = None class Solution: tilt = 0 def findTilt(self,", "+ rightSum + root.val root = TreeNode(10) root.left = TreeNode(3) root.left.left = TreeNode(2)", "0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root):", "def findTiltHelper(self, root): if not root: return 0 leftSum = self.findTiltHelper(root.left) rightSum =", "-> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not root: return 0", "x self.left = None self.right = None class Solution: tilt = 0 def", "None self.right = None class Solution: tilt = 0 def findTilt(self, root: TreeNode)", "def __init__(self, x): self.val = x self.left = None self.right = None class", "= self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum + rightSum + root.val", "root.left.right.right = TreeNode(9) root.right = TreeNode(15) root.right.left = TreeNode(13) root.right.right = TreeNode(17) root.right.right.right", "leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum) return leftSum", "- rightSum) return leftSum + rightSum + root.val root = TreeNode(10) root.left =", "Solution: tilt = 0 def findTilt(self, root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt", "root: TreeNode) -> int: self.findTiltHelper(root) return self.tilt def findTiltHelper(self, root): if not root:", "return 0 leftSum = self.findTiltHelper(root.left) rightSum = self.findTiltHelper(root.right) self.tilt += abs(leftSum - rightSum)" ]
[ "= resource.Table('konomania-tweet') # Load the JSON object created in the step 3 using", "\"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to", "pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df =", "if i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response", "table = resource.Table('konomania-tweet') # Load the JSON object created in the step 3", "boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding =", "DynamoDB table table = resource.Table('konomania-tweet') # Load the JSON object created in the", "created in the step 3 using put_item method for i, tweet in enumerate(df_json):", "json import codecs import pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path", "pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb',", "resource.Table('konomania-tweet') # Load the JSON object created in the step 3 using put_item", "df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table", "step 3 using put_item method for i, tweet in enumerate(df_json): if i >", "boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table table = resource.Table('konomania-tweet') # Load", "\"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"]", "3 using put_item method for i, tweet in enumerate(df_json): if i > 1:", "encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') #", "= boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table table = resource.Table('konomania-tweet') #", "the step 3 using put_item method for i, tweet in enumerate(df_json): if i", "i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response =", "i, tweet in enumerate(df_json): if i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet)", "header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1')", "df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table table =", "import codecs import pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path =", "= \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json =", "pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None,", "\"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records')", "enumerate(df_json): if i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test #", "> 1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response = table.get_item(Key={'seat_no':", "tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response = table.get_item(Key={'seat_no': 'A 314216'}) #", "# Connect to the DynamoDB table table = resource.Table('konomania-tweet') # Load the JSON", "import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding", "save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json", "import pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df", "df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the", "=[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB", "for i, tweet in enumerate(df_json): if i > 1: break tweet[\"id\"] = i", "table table = resource.Table('konomania-tweet') # Load the JSON object created in the step", "method for i, tweet in enumerate(df_json): if i > 1: break tweet[\"id\"] =", "Connect to the DynamoDB table table = resource.Table('konomania-tweet') # Load the JSON object", "in enumerate(df_json): if i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test", "JSON object created in the step 3 using put_item method for i, tweet", "Load the JSON object created in the step 3 using put_item method for", "in the step 3 using put_item method for i, tweet in enumerate(df_json): if", "df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource", "csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\")", "region_name='ap-northeast-1') # Connect to the DynamoDB table table = resource.Table('konomania-tweet') # Load the", "= \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns", "1: break tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response = table.get_item(Key={'seat_no': 'A", "# Load the JSON object created in the step 3 using put_item method", "the DynamoDB table table = resource.Table('konomania-tweet') # Load the JSON object created in", "= \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect", "object created in the step 3 using put_item method for i, tweet in", "= df.to_dict(orient='records') resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table table", "import json import codecs import pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\"", "put_item method for i, tweet in enumerate(df_json): if i > 1: break tweet[\"id\"]", "break tweet[\"id\"] = i table.put_item(Item=tweet) # Test # response = table.get_item(Key={'seat_no': 'A 314216'})", "as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\" df = pd.read_csv(csv_path,", "to the DynamoDB table table = resource.Table('konomania-tweet') # Load the JSON object created", "codecs import pandas as pd import boto3 csv_path = \"./fixed_tweets.csv\" save_path = \"./fixed_tweets.json\"", "tweet in enumerate(df_json): if i > 1: break tweet[\"id\"] = i table.put_item(Item=tweet) #", "= pd.read_csv(csv_path, header=None, encoding = \"utf-8\") df.columns =[\"tweet\"] df_json = df.to_dict(orient='records') resource =", "the JSON object created in the step 3 using put_item method for i,", "using put_item method for i, tweet in enumerate(df_json): if i > 1: break", "= i table.put_item(Item=tweet) # Test # response = table.get_item(Key={'seat_no': 'A 314216'}) # response", "resource = boto3.resource('dynamodb', region_name='ap-northeast-1') # Connect to the DynamoDB table table = resource.Table('konomania-tweet')" ]
[ "len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert)", "graph = {} # for l in range(ord('Z') - ord('A') + 1): #", "5 for e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class", "= Node(source) if target not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source,", "ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for e", "in range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for w in range(len(workers)):", "nodes_to_insert = [] graph_len = len(graph) time_point = 0 workers = [ -1", "→ {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node)", "graph[n].end_time and graph[n].busy >= 0 and w == 0: for k in graph[n].outputs:", "print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0:", "time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point + n", ": {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) ==", "def insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self): return str({ 'in': self.inputs.keys(),", "l in range(ord('Z') - ord('A') + 1): # graph[l] = Node(l) for source,", "output = [] nodes_to_insert = [] graph_len = len(graph) time_point = 0 workers", "L can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x", "to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time", "graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy]", "graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time:", "return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {} # for", "!= 0 and workers[w] == -1 and graph[n].busy == -1: print('Assigning {} to", "= 0 self.end_time = -1 self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id]", "{} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point", "time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0: for k", "+ 1 + 60 workers[w] = n graph[n].busy = w if time_point ==", "== 0: for k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {}", "def __init__(self, no): self.id = no self.inputs = {} self.outputs = {} self.begin_time", "nodes_to_insert[:limit] for n in processed_nodes: if n in graph: if w != 0", "= Node(l) for source, target in edges: if source not in graph: graph[source]", "= {} self.outputs = {} self.begin_time = 0 self.end_time = -1 self.busy =", "source not in graph: graph[source] = Node(source) if target not in graph: graph[target]", "range(len(workers)): nodes_to_insert = [] for node in graph: # print('{} : {} →", "limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if n", "{}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point + n + 1", "finished before step L can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) -", "print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no): self.id", "= 5 for e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1])))", "def insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id] =", "Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = [] graph_len =", "{} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0:", "time_point = 0 workers = [ -1 for i in range(6)] while(len(output) <", "for e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object):", "x in map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for e in edges:", "point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point + n +", "target): self.outputs[target_id] = target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id':", "def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {}", "be finished before step L can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7])", "graph[l] = Node(l) for source, target in edges: if source not in graph:", "= -1 graph[n].busy = -1 del graph[n] time_point += 1 print('Total time: {}", "- ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for", "== 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break", "for w in range(len(workers)): nodes_to_insert = [] for node in graph: # print('{}", "graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy", "len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert))", "= min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if n in", "- ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())] workers", "insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out':", "processed_nodes: if n in graph: if w != 0 and workers[w] == -1", "{}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n] time_point +=", "'id': [self.id]}) graph = {} # for l in range(ord('Z') - ord('A') +", "not in graph: graph[source] = Node(source) if target not in graph: graph[target] =", "= target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph", "if target not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output", "{}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total", "__repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {} #", "edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no):", "if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {}", "in map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for e in edges: print('{}", "= w if time_point == graph[n].end_time and graph[n].busy >= 0 and w ==", "if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers),", "self.end_time = -1 self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id] = source", "{} self.outputs = {} self.begin_time = 0 self.end_time = -1 self.busy = -1", "0 workers = [ -1 for i in range(6)] while(len(output) < graph_len): #", "→ {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no): self.id =", "# for l in range(ord('Z') - ord('A') + 1): # graph[l] = Node(l)", "TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n] time_point", "graph[source] = Node(source) if target not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target])", "# Example line: Step A must be finished before step L can begin.", "in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n)", "= 0 workers = [ -1 for i in range(6)] while(len(output) < graph_len):", "in range(len(workers)): nodes_to_insert = [] for node in graph: # print('{} : {}", "== -1: print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point))", "1): # graph[l] = Node(l) for source, target in edges: if source not", "k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point))", "line: Step A must be finished before step L can begin. edges =", "<reponame>zagura/aoc-2017 #!/usr/bin/python3 # Example line: Step A must be finished before step L", "-1 and graph[n].busy == -1: print('Assigning {} to worker {} at time point:", "no): self.id = no self.inputs = {} self.outputs = {} self.begin_time = 0", "and graph[n].busy >= 0 and w == 0: for k in graph[n].outputs: out", "workers = 5 for e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') +", "for i in range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for w", "graph_len = len(graph) time_point = 0 workers = [ -1 for i in", "if w != 0 and workers[w] == -1 and graph[n].busy == -1: print('Assigning", "# print(len(graph)) for w in range(len(workers)): nodes_to_insert = [] for node in graph:", "w if time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0:", "source_id, source): self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id] = target def", "in processed_nodes: if n in graph: if w != 0 and workers[w] ==", "{} self.begin_time = 0 self.end_time = -1 self.busy = -1 def insert_source(self, source_id,", "n in graph: if w != 0 and workers[w] == -1 and graph[n].busy", "== graph[n].end_time and graph[n].busy >= 0 and w == 0: for k in", "# print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if", "[] nodes_to_insert = [] graph_len = len(graph) time_point = 0 workers = [", "class Node(object): def __init__(self, no): self.id = no self.inputs = {} self.outputs =", "1 + 60 workers[w] = n graph[n].busy = w if time_point == graph[n].end_time", "- ord('A') + 1): # graph[l] = Node(l) for source, target in edges:", "= time_point + n + 1 + 60 workers[w] = n graph[n].busy =", "for n in processed_nodes: if n in graph: if w != 0 and", "print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs)", "graph: graph[source] = Node(source) if target not in graph: graph[target] = Node(target) graph[source].insert_target(target,", "target in edges: if source not in graph: graph[source] = Node(source) if target", "Example line: Step A must be finished before step L can begin. edges", "graph[n].busy == -1: print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w,", "= graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1", "= time_point graph[n].end_time = time_point + n + 1 + 60 workers[w] =", "graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = [] graph_len = len(graph) time_point", "in graph: graph[source] = Node(source) if target not in graph: graph[target] = Node(target)", "workers[w] == -1 and graph[n].busy == -1: print('Assigning {} to worker {} at", "map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for e in edges: print('{} →", "can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in", "-1 self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self,", "w != 0 and workers[w] == -1 and graph[n].busy == -1: print('Assigning {}", "= [] nodes_to_insert = [] graph_len = len(graph) time_point = 0 workers =", "i in range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for w in", "'out': self.outputs.keys(), 'id': [self.id]}) graph = {} # for l in range(ord('Z') -", "{} # for l in range(ord('Z') - ord('A') + 1): # graph[l] =", "self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self, target_id,", "for l in range(ord('Z') - ord('A') + 1): # graph[l] = Node(l) for", "edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x:", "graph[n].busy = w if time_point == graph[n].end_time and graph[n].busy >= 0 and w", ".'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in", "= source def insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self): return str({", "= n graph[n].busy = w if time_point == graph[n].end_time and graph[n].busy >= 0", "for x in map(lambda x: x.split(), open('input.in').readlines())] workers = 5 for e in", "+ e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no): self.id = no self.inputs", "0 self.end_time = -1 self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id] =", "{} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point", "time_point graph[n].end_time = time_point + n + 1 + 60 workers[w] = n", "w == 0: for k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing", "[ -1 for i in range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph))", "break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes:", "e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def", "graph[n].end_time = time_point + n + 1 + 60 workers[w] = n graph[n].busy", "+ n + 1 + 60 workers[w] = n graph[n].busy = w if", "self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {} # for l in range(ord('Z')", "time_point + n + 1 + 60 workers[w] = n graph[n].busy = w", "x.split(), open('input.in').readlines())] workers = 5 for e in edges: print('{} → {}'.format(chr(ord('A') +", "+ e[1]))) class Node(object): def __init__(self, no): self.id = no self.inputs = {}", "= len(graph) time_point = 0 workers = [ -1 for i in range(6)]", "{} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n", "0 and w == 0: for k in graph[n].outputs: out = graph[n].outputs[k] del", "ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())] workers =", "{}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert)", ">= 0 and w == 0: for k in graph[n].outputs: out = graph[n].outputs[k]", "source, target in edges: if source not in graph: graph[source] = Node(source) if", "node in graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}:", "at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point +", "-1 graph[n].busy = -1 del graph[n] time_point += 1 print('Total time: {} .'.format(time_point))", "del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy =", "# print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) ==", "for node in graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) #", "{}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no): self.id = no", "in graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node,", "= [ -1 for i in range(6)] while(len(output) < graph_len): # print(len(output)) #", "and graph[n].busy == -1: print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')),", "e[1]))) class Node(object): def __init__(self, no): self.id = no self.inputs = {} self.outputs", "graph: if w != 0 and workers[w] == -1 and graph[n].busy == -1:", "time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point + n + 1 + 60", "60 workers[w] = n graph[n].busy = w if time_point == graph[n].end_time and graph[n].busy", "# graph[l] = Node(l) for source, target in edges: if source not in", "graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = [] graph_len = len(graph) time_point =", "Step A must be finished before step L can begin. edges = [(ord(x[1])", "Node(object): def __init__(self, no): self.id = no self.inputs = {} self.outputs = {}", "workers = [ -1 for i in range(6)] while(len(output) < graph_len): # print(len(output))", "if source not in graph: graph[source] = Node(source) if target not in graph:", "min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if n in graph:", "n in processed_nodes: if n in graph: if w != 0 and workers[w]", "w, time_point)) graph[n].begin_time = time_point graph[n].end_time = time_point + n + 1 +", "for k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n,", "< graph_len): # print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert = []", "in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self,", "target not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output =", "len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point))", "#print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit =", "= nodes_to_insert[:limit] for n in processed_nodes: if n in graph: if w !=", "[] for node in graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs)))", "= [] graph_len = len(graph) time_point = 0 workers = [ -1 for", "nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if", "len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if n in graph: if", "+ 60 workers[w] = n graph[n].busy = w if time_point == graph[n].end_time and", "= no self.inputs = {} self.outputs = {} self.begin_time = 0 self.end_time =", "workers[w] = n graph[n].busy = w if time_point == graph[n].end_time and graph[n].busy >=", "time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n] time_point += 1", "graph[source]) output = [] nodes_to_insert = [] graph_len = len(graph) time_point = 0", "== -1 and graph[n].busy == -1: print('Assigning {} to worker {} at time", "workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n] time_point += 1 print('Total time:", "n + 1 + 60 workers[w] = n graph[n].busy = w if time_point", "__init__(self, no): self.id = no self.inputs = {} self.outputs = {} self.begin_time =", "not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = []", "self.inputs = {} self.outputs = {} self.begin_time = 0 self.end_time = -1 self.busy", "0: nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort()", "n graph[n].busy = w if time_point == graph[n].end_time and graph[n].busy >= 0 and", "{} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n]", "range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert", "open('input.in').readlines())] workers = 5 for e in edges: print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A')", "-1 def insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id]", "print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time =", "nodes_to_insert = [] for node in graph: # print('{} : {} → {}'.format(node,", "graph_len): # print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert = [] for", "str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {} # for l", "Node(l) for source, target in edges: if source not in graph: graph[source] =", "= {} self.begin_time = 0 self.end_time = -1 self.busy = -1 def insert_source(self,", "before step L can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A'))", "step L can begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for", "self.id = no self.inputs = {} self.outputs = {} self.begin_time = 0 self.end_time", "self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self): return", "target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph =", "[(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())]", "range(ord('Z') - ord('A') + 1): # graph[l] = Node(l) for source, target in", "len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node])) if len(graph[node].inputs) == 0: nodes_to_insert.append(node) #print(nodes_to_insert) if", "= [] for node in graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs),", "self.outputs[target_id] = target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]})", "= [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in map(lambda x: x.split(),", "[self.id]}) graph = {} # for l in range(ord('Z') - ord('A') + 1):", "out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] =", "target_id, target): self.outputs[target_id] = target def __repr__(self): return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(),", "A must be finished before step L can begin. edges = [(ord(x[1]) -", "graph[n].begin_time = time_point graph[n].end_time = time_point + n + 1 + 60 workers[w]", "and workers[w] == -1 and graph[n].busy == -1: print('Assigning {} to worker {}", "-1: print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time", "print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit]", "graph[n].busy >= 0 and w == 0: for k in graph[n].outputs: out =", "for source, target in edges: if source not in graph: graph[source] = Node(source)", "graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert =", "self.outputs = {} self.begin_time = 0 self.end_time = -1 self.busy = -1 def", "= -1 self.busy = -1 def insert_source(self, source_id, source): self.inputs[source_id] = source def", "edges: if source not in graph: graph[source] = Node(source) if target not in", "'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]}) graph = {} # for l in", "[] graph_len = len(graph) time_point = 0 workers = [ -1 for i", "while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert =", "print(len(graph)) for w in range(len(workers)): nodes_to_insert = [] for node in graph: #", "time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes = nodes_to_insert[:limit] for", "= Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = [] graph_len", "graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = []", "ord(x[7]) - ord('A')) for x in map(lambda x: x.split(), open('input.in').readlines())] workers = 5", "-1 for i in range(6)] while(len(output) < graph_len): # print(len(output)) # print(len(graph)) for", "= {} # for l in range(ord('Z') - ord('A') + 1): # graph[l]", "if time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0: for", "w in range(len(workers)): nodes_to_insert = [] for node in graph: # print('{} :", "nodes_to_insert.append(node) #print(nodes_to_insert) if len(nodes_to_insert) == 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit", "and w == 0: for k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n]", "= -1 def insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self, target_id, target):", "graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert = [] graph_len = len(graph)", "len(graph) time_point = 0 workers = [ -1 for i in range(6)] while(len(output)", "x: x.split(), open('input.in').readlines())] workers = 5 for e in edges: print('{} → {}'.format(chr(ord('A')", "in edges: if source not in graph: graph[source] = Node(source) if target not", "source def insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self): return str({ 'in':", "0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes =", "0 and workers[w] == -1 and graph[n].busy == -1: print('Assigning {} to worker", "worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point)) graph[n].begin_time = time_point graph[n].end_time =", "in range(ord('Z') - ord('A') + 1): # graph[l] = Node(l) for source, target", "e[0]),chr(ord('A') + e[1]))) class Node(object): def __init__(self, no): self.id = no self.inputs =", "out.inputs[n] print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1", "self.outputs.keys(), 'id': [self.id]}) graph = {} # for l in range(ord('Z') - ord('A')", "0: for k in graph[n].outputs: out = graph[n].outputs[k] del out.inputs[n] print(\"Removing {} TP", "output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del graph[n] time_point += 1 print('Total", "no self.inputs = {} self.outputs = {} self.begin_time = 0 self.end_time = -1", "== 0: print('Total time: {} .'.format(time_point)) break nodes_to_insert.sort() limit = min(len(workers), len(nodes_to_insert)) processed_nodes", "# print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert = [] for node", "processed_nodes = nodes_to_insert[:limit] for n in processed_nodes: if n in graph: if w", "#!/usr/bin/python3 # Example line: Step A must be finished before step L can", "ord('A') + 1): # graph[l] = Node(l) for source, target in edges: if", "+ 1): # graph[l] = Node(l) for source, target in edges: if source", "insert_source(self, source_id, source): self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id] = target", "print(len(output)) # print(len(graph)) for w in range(len(workers)): nodes_to_insert = [] for node in", "begin. edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in map(lambda", "source): self.inputs[source_id] = source def insert_target(self, target_id, target): self.outputs[target_id] = target def __repr__(self):", "if n in graph: if w != 0 and workers[w] == -1 and", "in graph: if w != 0 and workers[w] == -1 and graph[n].busy ==", "in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source]) output = [] nodes_to_insert", "print(\"Removing {} TP {}.\".format(n, time_point)) output.append(n) workers[graph[n].busy] = -1 graph[n].busy = -1 del", "Node(source) if target not in graph: graph[target] = Node(target) graph[source].insert_target(target, graph[target]) graph[target].insert_source(source, graph[source])", "self.begin_time = 0 self.end_time = -1 self.busy = -1 def insert_source(self, source_id, source):", "must be finished before step L can begin. edges = [(ord(x[1]) - ord('A'),", "graph: # print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs))) # print('{}: {}'.format(node, graph[node]))" ]
[ "HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000", "get_auth_token() -> str: \"\"\"Obtain a token for API users. **Example request** .. code-block::", "``username`` field. :statuscode 200: No errors. :statuscode 401: Not authenticated. \"\"\" return AuthTokenResponse(token=g.user.generate_auth_token()).json()", "\"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token: Token string. Use this", "AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token for", "api from keeper.auth import password_auth from keeper.logutils import log_route from ._models import AuthTokenResponse", "**Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date:", "OK Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11 GMT Server:", "http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection:", "deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** ..", "Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example", "auth ``username`` field. :statuscode 200: No errors. :statuscode 401: Not authenticated. \"\"\" return", "token in the basic auth ``username`` field. :statuscode 200: No errors. :statuscode 401:", "import password_auth from keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route()", "gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response**", "code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb", "the basic auth ``username`` field. :statuscode 200: No errors. :statuscode 401: Not authenticated.", ".. code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date: Tue, 09", "basic auth ``username`` field. :statuscode 200: No errors. :statuscode 401: Not authenticated. \"\"\"", "import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token", "users. **Example request** .. code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip,", "__future__ import annotations from flask import g from flask_accept import accept_fallback from keeper.api", "dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0", "accept_fallback from keeper.api import api from keeper.auth import password_auth from keeper.logutils import log_route", "keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token()", "from keeper.auth import password_auth from keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\")", "GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive", "a token for API users. **Example request** .. code-block:: http GET /token HTTP/1.1", "*/* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3", "in the basic auth ``username`` field. :statuscode 200: No errors. :statuscode 401: Not", "string token: Token string. Use this token in the basic auth ``username`` field.", "@password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token for API users. **Example request**", "\"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token: Token string. Use this token", "Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token:", "\"\"\"Authentication routes.\"\"\" from __future__ import annotations from flask import g from flask_accept import", "token: Token string. Use this token in the basic auth ``username`` field. :statuscode", "token for API users. **Example request** .. code-block:: http GET /token HTTP/1.1 Accept:", "Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization:", "http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb 2016", "Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token: Token", "@api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token for API", "localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 139", "application/json Date: Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\":", "} :reqheader Authorization: ``username:password`` :>json string token: Token string. Use this token in", "Use this token in the basic auth ``username`` field. :statuscode 200: No errors.", "GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string", "from flask_accept import accept_fallback from keeper.api import api from keeper.auth import password_auth from", "keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK", "Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" }", "Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block::", "import accept_fallback from keeper.api import api from keeper.auth import password_auth from keeper.logutils import", "20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json", "/token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host:", ":reqheader Authorization: ``username:password`` :>json string token: Token string. Use this token in the", "Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token: Token string.", ".. code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic", "@log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token for API users. **Example", "@accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a token for API users.", "Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http", "HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11", "g from flask_accept import accept_fallback from keeper.api import api from keeper.auth import password_auth", "Authorization: ``username:password`` :>json string token: Token string. Use this token in the basic", "from keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def", "\"\"\"Obtain a token for API users. **Example request** .. code-block:: http GET /token", "flask_accept import accept_fallback from keeper.api import api from keeper.auth import password_auth from keeper.logutils", "import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() ->", "string. Use this token in the basic auth ``username`` field. :statuscode 200: No", "def get_auth_token() -> str: \"\"\"Obtain a token for API users. **Example request** ..", "Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length:", "HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json", "Date: Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\"", "response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type: application/json Date: Tue,", "keeper.api import api from keeper.auth import password_auth from keeper.logutils import log_route from ._models", "09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader", "annotations from flask import g from flask_accept import accept_fallback from keeper.api import api", "import annotations from flask import g from flask_accept import accept_fallback from keeper.api import", "Connection: keep-alive Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200", "139 Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0", "code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz", "from __future__ import annotations from flask import g from flask_accept import accept_fallback from", "request** .. code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization:", "Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3", "from flask import g from flask_accept import accept_fallback from keeper.api import api from", "from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain", "from keeper.api import api from keeper.auth import password_auth from keeper.logutils import log_route from", "``username:password`` :>json string token: Token string. Use this token in the basic auth", "Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 {", "**Example request** .. code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate", "200 OK Content-Length: 139 Content-Type: application/json Date: Tue, 09 Feb 2016 20:23:11 GMT", "this token in the basic auth ``username`` field. :statuscode 200: No errors. :statuscode", "{ \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password`` :>json string token: Token string. Use", "2016 20:23:11 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { \"token\": \"<KEY>...\" } :reqheader Authorization: ``username:password``", "str: \"\"\"Obtain a token for API users. **Example request** .. code-block:: http GET", "Token string. Use this token in the basic auth ``username`` field. :statuscode 200:", "keeper.auth import password_auth from keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback", "-> str: \"\"\"Obtain a token for API users. **Example request** .. code-block:: http", "import api from keeper.auth import password_auth from keeper.logutils import log_route from ._models import", "Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic dXNlcjpwYXNz Connection: keep-alive Host: localhost:5000 User-Agent:", "routes.\"\"\" from __future__ import annotations from flask import g from flask_accept import accept_fallback", "import g from flask_accept import accept_fallback from keeper.api import api from keeper.auth import", "User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 139 Content-Type:", ":>json string token: Token string. Use this token in the basic auth ``username``", "API users. **Example request** .. code-block:: http GET /token HTTP/1.1 Accept: */* Accept-Encoding:", "._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str: \"\"\"Obtain a", "flask import g from flask_accept import accept_fallback from keeper.api import api from keeper.auth", "for API users. **Example request** .. code-block:: http GET /token HTTP/1.1 Accept: */*", "password_auth from keeper.logutils import log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required", "log_route from ._models import AuthTokenResponse @api.route(\"/token\") @accept_fallback @log_route() @password_auth.login_required def get_auth_token() -> str:" ]
[ "''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( '''", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "SOFTWARE. import os from selene import have from selene.support.shared import browser from tests.integration.helpers.givenpage", "GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name =", "software and associated documentation files (the \"Software\"), to deal # in the Software", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "do so, subject to the following conditions: # # The above copyright notice", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "and to permit persons to whom the Software is # furnished to do", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the Software without restriction, including without limitation the rights # to use, copy,", "browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "the following conditions: # # The above copyright notice and this permission notice", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def", "THE # SOFTWARE. import os from selene import have from selene.support.shared import browser", "browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( '''", "site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body(", "person obtaining a copy # of this software and associated documentation files (the", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "# furnished to do so, subject to the following conditions: # # The", "IN THE # SOFTWARE. import os from selene import have from selene.support.shared import", "the Software, and to permit persons to whom the Software is # furnished", "<filename>tests/acceptance/selene_page_factory_test.py # MIT License # # Copyright (c) 2015-2021 <NAME> # # Permission", "permit persons to whom the Software is # furnished to do so, subject", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os", "<h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox'", "Permission is hereby granted, free of charge, to any person obtaining a copy", "# SOFTWARE. import os from selene import have from selene.support.shared import browser from", "browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\"))", "browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit()", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function():", "in the Software without restriction, including without limitation the rights # to use,", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "= 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit()", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "copies of the Software, and to permit persons to whom the Software is", "def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>'''", "test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene", "# The above copyright notice and this permission notice shall be included in", "included in all # copies or substantial portions of the Software. # #", "OR OTHER DEALINGS IN THE # SOFTWARE. import os from selene import have", "# of this software and associated documentation files (the \"Software\"), to deal #", "to do so, subject to the following conditions: # # The above copyright", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "= 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert", "is hereby granted, free of charge, to any person obtaining a copy #", "above copyright notice and this permission notice shall be included in all #", "selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def", "persons to whom the Software is # furnished to do so, subject to", "sell # copies of the Software, and to permit persons to whom the", "<NAME> # # Permission is hereby granted, free of charge, to any person", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "conditions: # # The above copyright notice and this permission notice shall be", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\"))", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "2015-2021 <NAME> # # Permission is hereby granted, free of charge, to any", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name ==", "test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome'", "to permit persons to whom the Software is # furnished to do so,", "'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "selene import have from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page =", ") def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page)", "browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\"))", "# MIT License # # Copyright (c) 2015-2021 <NAME> # # Permission is", "= 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert", "notice shall be included in all # copies or substantial portions of the", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "of charge, to any person obtaining a copy # of this software and", "whom the Software is # furnished to do so, subject to the following", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page)", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' )", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( '''", "# # Permission is hereby granted, free of charge, to any person obtaining", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "free of charge, to any person obtaining a copy # of this software", "browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit():", "== 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a>", "shall be included in all # copies or substantial portions of the Software.", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def", "(c) 2015-2021 <NAME> # # Permission is hereby granted, free of charge, to", "The above copyright notice and this permission notice shall be included in all", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "and/or sell # copies of the Software, and to permit persons to whom", "so, subject to the following conditions: # # The above copyright notice and", "this permission notice shall be included in all # copies or substantial portions", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "import os from selene import have from selene.support.shared import browser from tests.integration.helpers.givenpage import", "import have from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format(", "os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit():", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "# Copyright (c) 2015-2021 <NAME> # # Permission is hereby granted, free of", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "# in the Software without restriction, including without limitation the rights # to", "is # furnished to do so, subject to the following conditions: # #", "files (the \"Software\"), to deal # in the Software without restriction, including without", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' )", "from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def", "GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox'", "copy # of this software and associated documentation files (the \"Software\"), to deal", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from selene", ") browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body(", ") browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "to the following conditions: # # The above copyright notice and this permission", "== 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "to deal # in the Software without restriction, including without limitation the rights", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "to any person obtaining a copy # of this software and associated documentation", "os from selene import have from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage", "GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "in all # copies or substantial portions of the Software. # # THE", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name ==", "and associated documentation files (the \"Software\"), to deal # in the Software without", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> '''", "<a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom():", "any person obtaining a copy # of this software and associated documentation files", "def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body(", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "# # The above copyright notice and this permission notice shall be included", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "''' <a id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name", "browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "have from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__))", "DEALINGS IN THE # SOFTWARE. import os from selene import have from selene.support.shared", "browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name ==", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "import browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function():", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "sublicense, and/or sell # copies of the Software, and to permit persons to", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "Software is # furnished to do so, subject to the following conditions: #", "USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from selene import", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome'", "# # Copyright (c) 2015-2021 <NAME> # # Permission is hereby granted, free", "Software, and to permit persons to whom the Software is # furnished to", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name == 'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name =", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "this software and associated documentation files (the \"Software\"), to deal # in the", "assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' )", "from selene import have from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "copyright notice and this permission notice shall be included in all # copies", "OTHER DEALINGS IN THE # SOFTWARE. import os from selene import have from", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1> ''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os from", "charge, to any person obtaining a copy # of this software and associated", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "MIT License # # Copyright (c) 2015-2021 <NAME> # # Permission is hereby", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "granted, free of charge, to any person obtaining a copy # of this", "# copies of the Software, and to permit persons to whom the Software", "empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) ) def setup_function(): browser.quit() def teardown_function(): browser.config.browser_name = 'chrome'", "'chrome' browser.quit() def test_can_init_default_browser_on_visit(): browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <h1 id=\"header\">Selene</h1>''' ) browser.element(\"#header\").should(have.exact_text(\"Selene\")) assert browser.driver.name", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "Copyright (c) 2015-2021 <NAME> # # Permission is hereby granted, free of charge,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "id=\"selene_link\">Selene site</a> ''' ) browser.element(\"#selene_link\").should(have.exact_text(\"Selene site\")) assert browser.driver.name == 'firefox' def test_can_init_default_browser_after_custom(): browser.open(empty_page)", "to whom the Software is # furnished to do so, subject to the", "License # # Copyright (c) 2015-2021 <NAME> # # Permission is hereby granted,", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "'chrome' def test_can_init_custom_browser_on_visit(): browser.config.browser_name = 'firefox' browser.open(empty_page) GivenPage(browser.driver).opened_with_body( ''' <a id=\"selene_link\">Selene site</a> '''", "permission notice shall be included in all # copies or substantial portions of", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "be included in all # copies or substantial portions of the Software. #", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "# Permission is hereby granted, free of charge, to any person obtaining a", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "from selene.support.shared import browser from tests.integration.helpers.givenpage import GivenPage empty_page = 'file://{}/../resources/empty.html'.format( os.path.abspath(os.path.dirname(__file__)) )", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "notice and this permission notice shall be included in all # copies or" ]
[ "import sys import traceback def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if", "class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat =", "test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry,", "pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\"", "def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback) ==", "exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\"", "class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class", "exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value,", "tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class", "exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None, None,", "exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception", "None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception):", "**kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class.", "self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\"", "exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification", "DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self,", "tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error", "pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\"", "TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests = tests", "TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat", "class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error.", "self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests", "super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args,", "**kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error.", "retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests,", "\"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat,", "error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class ArgumentError(TestFlowsError): \"\"\"Argument error.", "\"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description", "SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class ArgumentError(TestFlowsError):", "None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base", "exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy", "= tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass", "**kwargs): self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat", "class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test", "= retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self,", "if (exc_type, exc_value, exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info()", "exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\"", "exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat", "pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class ArgumentError(TestFlowsError): \"\"\"Argument error. \"\"\" pass", "string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback", "pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass", "\"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\"", "\"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually,", "== (None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip()", "RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError):", "ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class", "sys import traceback def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type,", "\"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat self.retry", "def __init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException):", "\"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result", "(exc_type, exc_value, exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return", "\"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class", "(None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class", "exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception.", "test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat self.retry =", "\"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs):", "TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class", "pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat", "self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement", "exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None, None, None):", "\"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class ArgumentError(TestFlowsError): \"\"\"Argument error. \"\"\"", "**kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests", "tests individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args,", "\"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None, None, None): exc_type,", "repeat, retry, *args, **kwargs): self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs)", "return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException):", "\"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception.", "exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None,", "__init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args,", "class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def", "\"\"\" def __init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat self.retry = retry", "super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError):", "\"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException):", "retry, *args, **kwargs): self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class", "exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test. \"\"\" def __init__(self, repeat, retry, *args,", "= repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually.", "import traceback def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value,", "TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass", "error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error.", "\"\"\" if (exc_type, exc_value, exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback =", "class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests =", "<filename>testflows/_core/exceptions.py<gh_stars>1-10 import sys import traceback def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\"", "\"\"\" def __init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class", "error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError):", "*args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base error exception", "class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass", "repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\"", "class TestFlowsError(TestFlowsException): \"\"\"Base error exception class. \"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\"", "self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def __init__(self, tests, *args, **kwargs):", "self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException): \"\"\"Repeat tests individually. \"\"\" def", "exception string. \"\"\" if (exc_type, exc_value, exc_traceback) == (None, None, None): exc_type, exc_value,", "class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass", "*args, **kwargs): self.repeat = repeat self.retry = retry super(TestIteration, self).__init__(*args, **kwargs) class TestRerunIndividually(TestFlowsException):", "= sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass", "\"\"\"Base exception class. \"\"\" pass class ResultException(TestFlowsException): \"\"\"Result exception. \"\"\" pass class DummyTestException(TestFlowsException):", "class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class", "def __init__(self, repeat, retry, *args, **kwargs): self.repeat = repeat self.retry = retry super(TestIteration,", "pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\" pass", "sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class. \"\"\" pass class", "\"\"\" pass class RequirementError(TestFlowsError): \"\"\"Requirement error. \"\"\" pass class SpecificationError(TestFlowsError): \"\"\"Specification error. \"\"\"", "exc_value, exc_traceback) == (None, None, None): exc_type, exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type,", "traceback def exception(exc_type=None, exc_value=None, exc_traceback=None): \"\"\"Get exception string. \"\"\" if (exc_type, exc_value, exc_traceback)", "\"\"\" pass class DummyTestException(TestFlowsException): \"\"\"Dummy test exception. \"\"\" pass class TestIteration(TestFlowsException): \"\"\"Repeat test.", "exc_value, exc_traceback = sys.exc_info() return \"\".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip() class TestFlowsException(Exception): \"\"\"Base exception class.", "__init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs) class TestFlowsError(TestFlowsException): \"\"\"Base", "individually. \"\"\" def __init__(self, tests, *args, **kwargs): self.tests = tests super(TestRerunIndividually, self).__init__(*args, **kwargs)", "\"\"\"Specification error. \"\"\" pass class DescriptionError(TestFlowsError): \"\"\"Description error. \"\"\" pass class ArgumentError(TestFlowsError): \"\"\"Argument" ]
[ "int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account,", "self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print e return { self.table_name: ec2_instances", "i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM = p['Value'] b =", "= self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print", "\"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\":", "\"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\":", "b = next((item for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if", "if OS is None: OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default':", "\"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\":", "= ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\":", "tenancy =='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT = [] p =", "= schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\":", "i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT = []", "\"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region):", "= [] p = next((item for item in i.tags if item[\"Key\"] == \"PLATFORM\"),", "= [] BUSINESS_UNIT = [] p = next((item for item in i.tags if", "= next((item for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b:", "table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform if OS is", "=='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT = [] p = next((item", "daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\",", "self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')),", "\"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\":", "\"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') }", "\"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\":", "asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self,", "ec2_def(self, i, schedule): OS = i.platform if OS is None: OS = 'linux'", "next((item for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT", "if p: PLATFORM = p['Value'] b = next((item for item in i.tags if", "\"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive'))", "int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def", "} def __init__(self, account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def", "'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform if OS is None: OS", "== \"PLATFORM\"), None) if p: PLATFORM = p['Value'] b = next((item for item", "','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account,", "tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT = [] p = next((item for", "{ \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type,", "__init__(self, account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules):", "in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if", "PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'),", "next((item for item in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM", "b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return {", "\"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\":", "i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy,", "region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = [] for s", "i, schedule): OS = i.platform if OS is None: OS = 'linux' tenancy", "Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = [] for s in schedules: i", "class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS", "Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule):", "BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours')", "s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except", "account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = [] for", "= [] for s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i,", "schedule): OS = i.platform if OS is None: OS = 'linux' tenancy =", "ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS =", "for item in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM =", "self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = [] for s in", "\"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\":", "b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive =", "item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive", "= 'shared' PLATFORM = [] BUSINESS_UNIT = [] p = next((item for item", "[] p = next((item for item in i.tags if item[\"Key\"] == \"PLATFORM\"), None)", "= 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM =", "'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM = []", "\"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account,", "Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = []", "generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS,", "ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print e return {", "item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM = p['Value'] b = next((item for", "in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM = p['Value'] b", "def __init__(self, account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self,", "<reponame>asurion/Hibernate from utils.randomGen import generateRandomString from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name =", "'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform if OS", "api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform", "OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM", "daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account, region)", "\"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account, region) self.resource", "def generate_rows(self, schedules): ec2_instances = [] for s in schedules: i = self.resource.Instance(s['resource_id'])", "p['Value'] b = next((item for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None)", "if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive", "for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT =", "BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive')", "account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances", "= next((item for item in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p:", "schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region,", "PLATFORM = p['Value'] b = next((item for item in i.tags if item[\"Key\"] ==", "in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception", "\"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self,", "generate_rows(self, schedules): ec2_instances = [] for s in schedules: i = self.resource.Instance(s['resource_id']) try:", "if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\":", "\"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\":", "self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print e", "OS = i.platform if OS is None: OS = 'linux' tenancy = i.placement['Tenancy']", "[] BUSINESS_UNIT = [] p = next((item for item in i.tags if item[\"Key\"]", "= b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return", "\"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\":", "== \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive =", "item in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM = p['Value']", "daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id,", "'shared' PLATFORM = [] BUSINESS_UNIT = [] p = next((item for item in", "[] for s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s)", "self.api_name) def generate_rows(self, schedules): ec2_instances = [] for s in schedules: i =", "OS is None: OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy", "\"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self,", "i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e:", "schedules): ec2_instances = [] for s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row", "region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances =", "if tenancy =='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT = [] p", "list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\":", "BUSINESS_UNIT = [] p = next((item for item in i.tags if item[\"Key\"] ==", "self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM,", "= 'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform if OS is None:", "import generateRandomString from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name =", "else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\":", "PLATFORM = [] BUSINESS_UNIT = [] p = next((item for item in i.tags", "\"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT,", "p = next((item for item in i.tags if item[\"Key\"] == \"PLATFORM\"), None) if", "from utils.randomGen import generateRandomString from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2'", "utils.randomGen import generateRandomString from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name", "None) if p: PLATFORM = p['Value'] b = next((item for item in i.tags", "\"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\": i.instance_type, \"OperatingSystem\": OS, \"Tenancy\":", "tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT", "= p['Value'] b = next((item for item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"),", "return { \"uuid\": generateRandomString(16), \"resource_id\": i.instance_id, \"Account\": self.account, \"resource_type\": \"ec2\", \"Region\": self.region, \"InstanceType\":", "is None: OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy =", "def ec2_def(self, i, schedule): OS = i.platform if OS is None: OS =", "= i.platform if OS is None: OS = 'linux' tenancy = i.placement['Tenancy'] if", "for s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row)", "schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account, region) self.resource = Connect.resource_connect(self, self.api_name)", "ec2_instances = [] for s in schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row =", "generateRandomString from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs'", "None: OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared'", "\"PLATFORM\"), None) if p: PLATFORM = p['Value'] b = next((item for item in", "i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'),", "= i.placement['Tenancy'] if tenancy =='default': tenancy = 'shared' PLATFORM = [] BUSINESS_UNIT =", "try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print e return", "from asyncProducerUtil.utils.connect import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def", "i.platform if OS is None: OS = 'linux' tenancy = i.placement['Tenancy'] if tenancy", "import Connect class ElasticComputeDefinition(Connect): api_name = 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i,", "schedules: i = self.resource.Instance(s['resource_id']) try: ec2_table_row = self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as", "OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1,", "item in i.tags if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value']", "i.instance_type, \"OperatingSystem\": OS, \"Tenancy\": tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')),", "None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else:", "s) ec2_instances.append(ec2_table_row) except Exception as e: print e return { self.table_name: ec2_instances }", "p: PLATFORM = p['Value'] b = next((item for item in i.tags if item[\"Key\"]", "= 'ec2' table_name = 'scheduler_state_logs' def ec2_def(self, i, schedule): OS = i.platform if", "if item[\"Key\"] == \"PLATFORM\"), None) if p: PLATFORM = p['Value'] b = next((item", "1, \"schedule\": daysActive, \"tz\": schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self,", "= Connect.resource_connect(self, self.api_name) def generate_rows(self, schedules): ec2_instances = [] for s in schedules:", "if item[\"Key\"] == \"BUSINESS_UNIT\"), None) if b: BUSINESS_UNIT = b['Value'] if isinstance(schedule.get('daysActive'), list):", "= self.ec2_def(i, s) ec2_instances.append(ec2_table_row) except Exception as e: print e return { self.table_name:", "tenancy, \"PLATFORM\": PLATFORM, \"BUSINESS_UNIT\": BUSINESS_UNIT, \"StopTime\": int(schedule.get('stop_time')), \"StartTime\": int(schedule.get('start_time')), \"instance_count\": 1, \"schedule\": daysActive,", "isinstance(schedule.get('daysActive'), list): daysActive = ','.join(schedule.get('daysActive')) else: daysActive = schedule.get('daysActive') return { \"uuid\": generateRandomString(16),", "schedule.get('tz'), \"TotalHours\": schedule.get('TotalHours') } def __init__(self, account, region): Connect.__init__(self, account, region) self.resource =" ]
[ "in a single cell and finding their average value, compared to the value", "an LLS is.\"\"\" if num == None: num = self.NumLos #Get some random", "ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to", "large #compared to the grid size. return cofm def check_mean(self): \"\"\"Compute difference between", "want to compare to a quasar survey with pixels large #compared to the", "= self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm =", "right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab =", "self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find", "axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find a bunch", "__init__(self,num, base, numlos=5000, res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"):", "(grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls =", "np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab))", "self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis,", "#Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for", "get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions known to be", "print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions", "os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\" def", "numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos", "\"\"\"Load the positions of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions", "spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs", "spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1., cdir =", "\"\"\"Class to generate spectra in the positions where there is a DLA, as", "np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2]", "snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\",", "dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab", "#All through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile,", "(ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra):", "of the spectra in this cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0]", "self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile =", "as known from the grid generation.\"\"\" from __future__ import print_function import numpy as", "random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell", "return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation", "dla=True): \"\"\"Load the values of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS", "loading several sightlines in a single cell and finding their average value, compared", "through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed", "1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines", "gridfile, dla=True): \"\"\"Load the values of DLAs or LLS from savefile\"\"\" #Load the", "for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,", "None): \"\"\"Find a bunch of sightline positions through a single cell containing a", "positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed)", "zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This", "through the same cell, in rare cases. #This is only a problem if", "the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to make the", "cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through the same cell,", "y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for", "you want to compare to a quasar survey with pixels large #compared to", "to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls =", "-*- coding: utf-8 -*- \"\"\"Class to generate spectra in the positions where there", "grid generation.\"\"\" from __future__ import print_function import numpy as np import hdfsim import", "compare to a quasar survey with pixels large #compared to the grid size.", "savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All", "= 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push", "push lines through them f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close()", "#compared to the grid size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the", "positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This", "np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some", "cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self,", "positions where there is a DLA, as known from the grid generation.\"\"\" from", "dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi = np.array(grp[\"LLS_val\"]) f.close() return", "specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load", "base, numlos=5000, res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load", "positions of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"]", "def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions known to", "dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num,", "grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability", "None: num = self.NumLos #Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm", "y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval", "f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is", "as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\" def __init__(self,num,", "pixels large #compared to the grid size. return cofm def check_mean(self): \"\"\"Compute difference", "needed to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls", "single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a", "spectra in the positions where there is a DLA, as known from the", "single cell and finding their average value, compared to the value in the", "cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\",", "in rare cases. #This is only a problem if you want to compare", "positions through a single cell containing a DLA.\"\"\" if num == None: num", "rare cases. #This is only a problem if you want to compare to", "self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base,", "between the mean column of the spectra in this cell and the grid", "f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos =", "if you want to compare to a quasar survey with pixels large #compared", "colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self,", "quasar survey with pixels large #compared to the grid size. return cofm def", "the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi = np.array(grp[\"LLS_val\"]) f.close()", "cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find a", "num = None): \"\"\"Find a bunch of sightline positions through a single cell", "the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close()", "of sightline positions through a single cell containing a DLA.\"\"\" if num ==", "or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This", "= hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir", "h5py import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra", "np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def", "res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\")", "import h5py import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line", "= self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile,", "= (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls", "DLA or an LLS is.\"\"\" if num == None: num = self.NumLos #Get", "the grid size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of", "cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval =", "in the positions where there is a DLA, as known from the grid", "their average value, compared to the value in the cell.\"\"\" def __init__(self,num, base,", "cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17)", "None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through y", "+= self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through the", "#All through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile)", "survey with pixels large #compared to the grid size. return cofm def _load_dla_index(self,", "be where a DLA or an LLS is.\"\"\" if num == None: num", "where a DLA or an LLS is.\"\"\" if num == None: num =", "a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within", "to the value in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1.,", "= np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5)", "a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up", "(ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls)", "of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid", "size. return cofm def check_mean(self): \"\"\"Compute difference between the mean column of the", "1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to make the dimensions right ind", "base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0'))", "<filename>gridspectra.py # -*- coding: utf-8 -*- \"\"\"Class to generate spectra in the positions", "to push lines through them f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"]", "mean column of the spectra in this cell and the grid value.\"\"\" dlaval", "generation code by loading several sightlines in a single cell and finding their", "for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,", "spectra in this cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden =", "0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile", "grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval)", "this cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval", "#Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir,", "from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz =", "savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to", "savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them f = hdfsim.get_file(num,", "numlos #All through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind =", "of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp =", "repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True)", "_load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs or LLS from savefile\"\"\" #Load", "needed to make the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi", "print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of", "reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num", "if num == None: num = self.NumLos #Get a single random position self.index", "gridfile, dla=False): \"\"\"Load the positions of DLAs or LLS from savefile\"\"\" #Load the", "box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1]", "savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them f = hdfsim.get_file(num, base,", "= (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab =", "thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find a bunch of sightline", "value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From", "some random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize", "in this cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1)", "a single cell and finding their average value, compared to the value in", "= np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5)", "= np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to make", "np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation code", "np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some", "being through the same cell, in rare cases. #This is only a problem", "grid size. return cofm def check_mean(self): \"\"\"Compute difference between the mean column of", "vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else:", "numlos=5000, res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos", "__future__ import print_function import numpy as np import hdfsim import h5py import vw_spectra", "several sightlines in a single cell and finding their average value, compared to", "through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla)", "self.NumLos #Get some random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm =", "return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs or LLS", "np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm =", "problem if you want to compare to a quasar survey with pixels large", "yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls", "(ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class", "to make the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi =", "#Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm()", "#This is only a problem if you want to compare to a quasar", "from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1., cdir = None,", "through a single cell containing a DLA.\"\"\" if num == None: num =", "self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through the same cell, in rare", "f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos", "class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000,", "print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs or LLS from", "check_mean(self): \"\"\"Compute difference between the mean column of the spectra in this cell", "numpy as np import hdfsim import h5py import vw_spectra import os.path as path", "vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation code by loading several sightlines", "np import hdfsim import h5py import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra):", "vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None):", "base, numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load", "sightline positions known to be where a DLA or an LLS is.\"\"\" if", "f[\"abslists\"] #This is needed to make the dimensions right if dla: nhi =", "num = None): \"\"\"Find a bunch of sightline positions known to be where", "= self.NumLos #Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T", "#This is needed to make the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"])", "if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num =", "specialised class tests the spectral generation code by loading several sightlines in a", "zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests", "\"\"\"Find a bunch of sightline positions known to be where a DLA or", "import print_function import numpy as np import hdfsim import h5py import vw_spectra import", "bunch of sightline positions known to be where a DLA or an LLS", "#Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp", "if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz", "1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines", "as np import hdfsim import h5py import vw_spectra import os.path as path class", "self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] +=", "zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class", "= np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5)", "#This is needed to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not", "make the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi = np.array(grp[\"LLS_val\"])", "cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla:", "LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz", "savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0]", "cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] +=", "\"\"\"Generate metal line spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res =", "class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation code by loading", "get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions through a single", "column of the spectra in this cell and the grid value.\"\"\" dlaval =", "line spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1., cdir", "TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation code by loading several", "through them f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir", "= np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla)", "large #compared to the grid size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load", "grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed", "yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab", "cell containing a DLA.\"\"\" if num == None: num = self.NumLos #Get a", "a DLA or an LLS is.\"\"\" if num == None: num = self.NumLos", "to the grid size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions", "there is a DLA, as known from the grid generation.\"\"\" from __future__ import", "gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them f = hdfsim.get_file(num, base, 0)", "from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed", "spectral generation code by loading several sightlines in a single cell and finding", "from the grid generation.\"\"\" from __future__ import print_function import numpy as np import", "_load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs or LLS from savefile\"\"\" #Load", "a single cell containing a DLA.\"\"\" if num == None: num = self.NumLos", "zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral generation code by", "(ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz", "cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through", "the values of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r')", "= None): \"\"\"Find a bunch of sightline positions through a single cell containing", "self.NumLos = numlos #All through y axis axis = np.ones(self.NumLos) #Load grid positions", "+= self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through the same cell, in", "value in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir =", "dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None):", "is needed to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla:", "DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid =", "pixels large #compared to the grid size. return cofm def _load_dla_index(self, gridfile, dla=False):", "the spectral generation code by loading several sightlines in a single cell and", "seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through", "dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res,", "def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None,", "axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23)", "= self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos,", "self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm()", "#Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to make", "grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num,", "= path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through y axis axis", "f[\"abslists\"] #This is needed to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if", "to compare to a quasar survey with pixels large #compared to the grid", "print_function import numpy as np import hdfsim import h5py import vw_spectra import os.path", "from __future__ import print_function import numpy as np import hdfsim import h5py import", "same cell, in rare cases. #This is only a problem if you want", "survey with pixels large #compared to the grid size. return cofm def check_mean(self):", "values of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp", "lines through them f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if", "num = self.NumLos #Get some random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num)", "cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find a bunch of sightline", "known to be where a DLA or an LLS is.\"\"\" if num ==", "np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values", "them f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir ==", "num = self.NumLos #Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm =", "the positions of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r')", "or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"])", "the mean column of the spectra in this cell and the grid value.\"\"\"", "the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From", "else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find a bunch", "could end up being through the same cell, in rare cases. #This is", "GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res", "yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return", "base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find", "end up being through the same cell, in rare cases. #This is only", "= 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push", "with pixels large #compared to the grid size. return cofm def _load_dla_index(self, gridfile,", "not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab", "the spectra in this cell and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden", "self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num =", "size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs or", "savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions", "= f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile)", "if num == None: num = self.NumLos #Get some random indices into the", "up being through the same cell, in rare cases. #This is only a", "the positions where there is a DLA, as known from the grid generation.\"\"\"", "of sightline positions known to be where a DLA or an LLS is.\"\"\"", "index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] +=", "cell, in rare cases. #This is only a problem if you want to", "is.\"\"\" if num == None: num = self.NumLos #Get some random indices into", "cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs or LLS from", "cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] +=", "# -*- coding: utf-8 -*- \"\"\"Class to generate spectra in the positions where", "dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab =", "#compared to the grid size. return cofm def check_mean(self): \"\"\"Compute difference between the", "the grid size. return cofm def check_mean(self): \"\"\"Compute difference between the mean column", "bunch of sightline positions through a single cell containing a DLA.\"\"\" if num", "is only a problem if you want to compare to a quasar survey", "and the grid value.\"\"\" dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos", "\"\"\"Find a bunch of sightline positions through a single cell containing a DLA.\"\"\"", "dlaval = self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval)", "= np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the", "= (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised", "#Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions", "generation.\"\"\" from __future__ import print_function import numpy as np import hdfsim import h5py", "f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz", "sightlines could end up being through the same cell, in rare cases. #This", "= np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm", "path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\" def __init__(self,num, base,", "= None): \"\"\"Find a bunch of sightline positions known to be where a", "average value, compared to the value in the cell.\"\"\" def __init__(self,num, base, numlos=5000,", "= self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis,", "__init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"):", "self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3)", "repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True)", "= self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num", "num == None: num = self.NumLos #Get a single random position self.index =", "to be where a DLA or an LLS is.\"\"\" if num == None:", "is needed to make the dimensions right if dla: nhi = np.array(grp[\"DLA_val\"]) else:", "into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a", "= self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm,", "= (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls =", "a DLA, as known from the grid generation.\"\"\" from __future__ import print_function import", "= 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to make the dimensions right", "None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them f", "savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through y axis", "grp = f[\"abslists\"] #This is needed to make the dimensions right if dla:", "path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through y axis axis =", "self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to make the dimensions", "#Load halos to push lines through them f = hdfsim.get_file(num, base, 0) self.box", "axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval =", "coding: utf-8 -*- \"\"\"Class to generate spectra in the positions where there is", "random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions", "def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs or LLS from savefile\"\"\"", "a quasar survey with pixels large #compared to the grid size. return cofm", "def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs or LLS from savefile\"\"\"", "import hdfsim import h5py import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate", "num == None: num = self.NumLos #Get some random indices into the box.", "= (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra,", "grid size. return cofm def _load_dla_index(self, gridfile, dla=False): \"\"\"Load the positions of DLAs", "positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base,", "savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self,", "np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed", "only a problem if you want to compare to a quasar survey with", "\"\"\"Load the values of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions", "DLA.\"\"\" if num == None: num = self.NumLos #Get a single random position", "== None: num = self.NumLos #Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int)", "indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within", "= None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them", "ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz", "#Re-seed for repeatability np.random.seed(seed) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir,", "res, cdir, savefile=savefile,savedir=savedir, reload_file=True) def get_cofm(self, num = None): \"\"\"Find a bunch of", "def check_mean(self): \"\"\"Compute difference between the mean column of the spectra in this", "res = 1., seed=23,cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to", "np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2]", "self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile, dla) #Re-seed for repeatability np.random.seed(seed) cofm", "None): \"\"\"Find a bunch of sightline positions known to be where a DLA", "\"\"\"Compute difference between the mean column of the spectra in this cell and", "within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end", "value, compared to the value in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res", "= self.NumLos #Get some random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm", "LLS is.\"\"\" if num == None: num = self.NumLos #Get some random indices", "cell and finding their average value, compared to the value in the cell.\"\"\"", "f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to make the dimensions right if", "= f[\"abslists\"] #This is needed to make the dimensions right if dla: nhi", "None: num = self.NumLos #Get some random indices into the box. index =", "reload_file=True) def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions through", "base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos,", "positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could", "or an LLS is.\"\"\" if num == None: num = self.NumLos #Get some", "#Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines", "= self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval)", "compared to the value in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res =", "= path.join(savedir,gridfile) self.NumLos = numlos #All through y axis axis = np.ones(self.NumLos) #Load", "gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through y axis axis = np.ones(self.NumLos)", "(grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab =", "ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls)", "position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1]", "positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to make the dimensions right", "to a quasar survey with pixels large #compared to the grid size. return", "the cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None, dla=True,", "halos to push lines through them f = hdfsim.get_file(num, base, 0) self.box =", "to the grid size. return cofm def check_mean(self): \"\"\"Compute difference between the mean", "dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through them f =", "= np.append(yslab,yslab_lls) zslab = (ind[2]+0.5)*self.celsz zslab_lls = (ind_lls[2]+0.5)*self.celsz zslab = np.append(zslab,zslab_lls) return np.array((yslab,", "cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def", "sightline positions through a single cell containing a DLA.\"\"\" if num == None:", "the same cell, in rare cases. #This is only a problem if you", "\"\"\"This specialised class tests the spectral generation code by loading several sightlines in", "code by loading several sightlines in a single cell and finding their average", "res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to", "single cell containing a DLA.\"\"\" if num == None: num = self.NumLos #Get", "a DLA.\"\"\" if num == None: num = self.NumLos #Get a single random", "print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs or", "self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm,", "cases. #This is only a problem if you want to compare to a", "hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None: savedir =", "dla=False): \"\"\"Load the positions of DLAs or LLS from savefile\"\"\" #Load the DLA/LLS", "difference between the mean column of the spectra in this cell and the", "import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation snapshot\"\"\"", "DLAs\") def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions known", "LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is", "= numlos #All through y axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind", "#Some sightlines could end up being through the same cell, in rare cases.", "the grid generation.\"\"\" from __future__ import print_function import numpy as np import hdfsim", "def __init__(self,num, base, numlos=5000, res = 1., cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None,", "simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1., cdir = None, dla=True,", "path.join(savedir,gridfile) self.NumLos = numlos #All through y axis axis = np.ones(self.NumLos) #Load grid", "self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being through the same", "with pixels large #compared to the grid size. return cofm def check_mean(self): \"\"\"Compute", "np.append(zslab,zslab_lls) return np.array((yslab, zslab)) class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra): \"\"\"This specialised class tests the spectral", "containing a DLA.\"\"\" if num == None: num = self.NumLos #Get a single", "grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the values of DLAs or LLS", "np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"] #This is needed to make the", "-*- \"\"\"Class to generate spectra in the positions where there is a DLA,", "a problem if you want to compare to a quasar survey with pixels", "metal line spectra from simulation snapshot\"\"\" def __init__(self,num, base, numlos=5000, res = 1.,", "class tests the spectral generation code by loading several sightlines in a single", "the value in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir", "thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find a", "generate spectra in the positions where there is a DLA, as known from", "def get_cofm(self, num = None): \"\"\"Find a bunch of sightline positions through a", "self._load_dla_index(gridfile) #Re-seed for repeatability np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res,", "in the cell.\"\"\" def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None,", "#Get some random indices into the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T", "finding their average value, compared to the value in the cell.\"\"\" def __init__(self,num,", "utf-8 -*- \"\"\"Class to generate spectra in the positions where there is a", "import numpy as np import hdfsim import h5py import vw_spectra import os.path as", "positions known to be where a DLA or an LLS is.\"\"\" if num", "self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found DLAs\") def get_cofm(self, num = None): \"\"\"Find a bunch of", "cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5) #Some sightlines could end up being", "DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp = f[\"abslists\"]", "cofm def check_mean(self): \"\"\"Compute difference between the mean column of the spectra in", "the box. index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num) cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T #Randomize positions within a cell", "to generate spectra in the positions where there is a DLA, as known", "make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:]) if not dla: ind_lls = (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:])", "is a DLA, as known from the grid generation.\"\"\" from __future__ import print_function", "= np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize positions within a cell cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5) cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5)", "self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True):", "hdfsim import h5py import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal", "= f[\"abslists\"] #This is needed to make the dimensions right ind = (grp[\"DLA\"][0,:],grp[\"DLA\"][1,:],grp[\"DLA\"][2,:])", "== None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos #All through", "a bunch of sightline positions through a single cell containing a DLA.\"\"\" if", "grp = f[\"abslists\"] #This is needed to make the dimensions right ind =", "np.random.seed(23) cofm = self.get_cofm() vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if", "self.NumLos #Get a single random position self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int) cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T #Randomize", "by loading several sightlines in a single cell and finding their average value,", "DLAs or LLS from savefile\"\"\" #Load the DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"]", "if savedir == None: savedir = path.join(base,\"snapdir_\"+str(num).rjust(3,'0')) gridfile = path.join(savedir,gridfile) self.NumLos = numlos", "vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from simulation", "DLA, as known from the grid generation.\"\"\" from __future__ import print_function import numpy", "return cofm def check_mean(self): \"\"\"Compute difference between the mean column of the spectra", "and finding their average value, compared to the value in the cell.\"\"\" def", "f = hdfsim.get_file(num, base, 0) self.box = f[\"Header\"].attrs[\"BoxSize\"] f.close() if savedir == None:", "a bunch of sightline positions known to be where a DLA or an", "sightlines in a single cell and finding their average value, compared to the", "axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True) if dla: self.replace_not_DLA(ndla=numlos, thresh=10**20.3) else: self.replace_not_DLA(ndla=numlos, thresh=10**17) print(\"Found", "right if dla: nhi = np.array(grp[\"DLA_val\"]) else: nhi = np.array(grp[\"LLS_val\"]) f.close() return nhi", "axis axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile) #Re-seed for repeatability", "the DLA/LLS positions f=h5py.File(gridfile,'r') grid_file=f[\"HaloData\"] ngrid = np.array(grid_file[\"ngrid\"]) self.celsz = 1.*self.box/ngrid[0] grp =", "import vw_spectra import os.path as path class GridSpectra(vw_spectra.VWSpectra): \"\"\"Generate metal line spectra from", "== None: num = self.NumLos #Get some random indices into the box. index", "DLA/LLS positions f=h5py.File(gridfile,'r') grp = f[\"abslists\"] #This is needed to make the dimensions", "self.dlaval[self.index][0] colden = self.get_col_density(\"H\",1) specval = np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def", "cdir = None, dla=True, savefile=\"grid_spectra_DLA.hdf5\", savedir=None, gridfile=\"boxhi_grid_H2.hdf5\"): #Load halos to push lines through", "known from the grid generation.\"\"\" from __future__ import print_function import numpy as np", "= np.sum(colden)/self.NumLos print(\"From spectra:\",specval) print(\"From grid:\",10**dlaval) print(\"different:\",specval/10**dlaval) def _load_dla_val(self, gridfile, dla=True): \"\"\"Load the", "tests the spectral generation code by loading several sightlines in a single cell", "axis = np.ones(self.NumLos) #Load grid positions self.dlaind = self._load_dla_index(gridfile, dla) self.dlaval = self._load_dla_val(gridfile,", "where there is a DLA, as known from the grid generation.\"\"\" from __future__", "= (grp[\"LLS\"][0,:],grp[\"LLS\"][1,:],grp[\"LLS\"][2,:]) f.close() yslab = (ind[1]+0.5)*self.celsz yslab_lls = (ind_lls[1]+0.5)*self.celsz yslab = np.append(yslab,yslab_lls) zslab" ]
[ "main(args): # Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train", "# Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network", "parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent", "import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing", ") parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\", default=2, type=int, required=False )", "random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\", default=25, type=int,", "# Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps,", "np from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser", "Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) #", "princomp extraction from CLI \"\"\" import argparse import os import numpy as np", "components\", description=\"Testing HebbNet principal components by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number", "= np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make data", "required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\", default=2, type=int, required=False", "args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data,", "import argparse import os import numpy as np from demo_utils import get_random_data from", "= np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ == \"__main__\": args", "def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx,", "hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network", "= np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product of", ":].T def main(args): # Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension )", "data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network =", "top two PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str(", "by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\",", "np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal", "prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ == \"__main__\":", "np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make data demo_data", "type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array))", "MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet", "= argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components by decomposing random", "prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components by decomposing random data\" )", "data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int,", "principal components by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for", "principal components\", description=\"Testing HebbNet principal components by decomposing random data\" ) parser.add_argument( \"--num_samples\",", "def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components", "return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:]", "real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) #", "\"\"\" import argparse import os import numpy as np from demo_utils import get_random_data", "help=\"Number of principle components to extract\", default=2, type=int, required=False ) return parser.parse_args() def", "parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return", "two PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat,", "has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps", "hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product of top two", "_argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components by", "\"--num_pc\", help=\"Number of principle components to extract\", default=2, type=int, required=False ) return parser.parse_args()", "S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): #", "type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False )", "help=\"Dimension of synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of", "U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args):", "argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components by decomposing random data\"", "type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\", default=2, type=int,", "get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the", ") parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\",", "get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc],", "= get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show", "inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if", "the inner product of top two PCs with learned input weights inner_prod_mat =", "/= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product of top two PCs", "of top two PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string =", "np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product of top two PCs with", "type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int, required=False", "parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\",", "demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork(", "required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False ) parser.add_argument(", "PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True,", "_idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make data demo_data =", "components by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic", "default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V =", "data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\",", "axis=0, keepdims=True) # Show the inner product of top two PCs with learned", "extraction from CLI \"\"\" import argparse import os import numpy as np from", "from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser =", "import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing", "of principle components to extract\", default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array,", "components to extract\", default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U,", "numpy as np from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def", "network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train", "HebbNet principal components\", description=\"Testing HebbNet principal components by decomposing random data\" ) parser.add_argument(", "return V[_idx, :].T def main(args): # Make data demo_data = get_random_data( args.num_samples, args.data_dimension,", "\"\"\" Test princomp extraction from CLI \"\"\" import argparse import os import numpy", "extract\", default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V", "def main(args): # Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) #", "\"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension", "V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make", "act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps =", "to extract\", default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S,", "synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100,", "parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\", description=\"Testing HebbNet principal components by decomposing", "os import numpy as np from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork", "required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int, required=False )", ") return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx =", "default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int,", "required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx", "np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make data demo_data = get_random_data( args.num_samples,", "latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', )", "inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ == \"__main__\": args = _argparse()", "= real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__", ") # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data,", "args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner", "Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0,", "help=\"Number of samples for synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension", "hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal components\",", "# Show the inner product of top two PCs with learned input weights", "input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat,", "Show the inner product of top two PCs with learned input weights inner_prod_mat", "demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser(", "dimension of synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle", "np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ == \"__main__\": args =", "= get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension,", "decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\", default=25,", "with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4", "principle components to extract\", default=2, type=int, required=False ) return parser.parse_args() def get_top_princomps(data_array, num_pcs):", "get_top_princomps(data_array, num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T", "args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear',", "HebbNet principal components by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples", "network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True)", "for synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\",", "Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network", "data\" ) parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\", default=25, type=int, required=False", "parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\", default=25, type=int, required=False ) parser.add_argument(", "data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3,", "help=\"Latent dimension of synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of", "from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet principal", "= np.argsort(S)[-num_pcs:] return V[_idx, :].T def main(args): # Make data demo_data = get_random_data(", "[args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc)", "get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse(): parser = argparse.ArgumentParser( prog=\"Testing HebbNet", "argparse import os import numpy as np from demo_utils import get_random_data from hebbnets.networks", "synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\",", "hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps", "np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product of top", "keepdims=True) # Show the inner product of top two PCs with learned input", "samples for synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic", "of synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components", "import numpy as np from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True)", "V[_idx, :].T def main(args): # Make data demo_data = get_random_data( args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension", "= MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps", ") parser.add_argument( \"--num_samples\", help=\"Number of samples for synthetic data\", default=25, type=int, required=False )", "real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ ==", "description=\"Testing HebbNet principal components by decomposing random data\" ) parser.add_argument( \"--num_samples\", help=\"Number of", "weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4))", "\"--num_samples\", help=\"Number of samples for synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\",", ") parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int, required=False ) parser.add_argument(", "parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\", default=2, type=int, required=False ) return", ") hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights)", "import os import numpy as np from demo_utils import get_random_data from hebbnets.networks import", "suppress_small=True, precision=4 ) print(np.array_str(inner_prod_mat, precision=4)) if __name__ == \"__main__\": args = _argparse() main(args)", "\"--data_latent_dimension\", help=\"Latent dimension of synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number", "# Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000)", "MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False, act_type='linear', ) hah_network.train(demo_data, num_epochs=1000) # Build/train network real_princomps =", "synthetic data\", default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components to", "product of top two PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string", "num_epochs=1000) # Build/train network real_princomps = get_top_princomps(demo_data, args.num_pc) hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /=", "hebb_princomps = np.squeeze(hah_network.layers[0].input_weights) hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True) # Show the inner product", "from CLI \"\"\" import argparse import os import numpy as np from demo_utils", "of synthetic data\", default=100, type=int, required=False ) parser.add_argument( \"--data_latent_dimension\", help=\"Latent dimension of synthetic", "CLI \"\"\" import argparse import os import numpy as np from demo_utils import", "default=3, type=int, required=False ) parser.add_argument( \"--num_pc\", help=\"Number of principle components to extract\", default=2,", "learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps) prod_as_string = np.array_str( inner_prod_mat, suppress_small=True, precision=4 )", "Test princomp extraction from CLI \"\"\" import argparse import os import numpy as", "as np from demo_utils import get_random_data from hebbnets.networks import MultilayerHahNetwork np.set_printoptions(suppress=True) def _argparse():", "of samples for synthetic data\", default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of", "num_pcs): U, S, V = np.linalg.svd(np.array(data_array)) _idx = np.argsort(S)[-num_pcs:] return V[_idx, :].T def", "args.num_samples, args.data_dimension, latent_dim=args.data_latent_dimension ) # Build/train network hah_network = MultilayerHahNetwork( args.data_dimension, [args.num_pc], has_bias=False,", "inner product of top two PCs with learned input weights inner_prod_mat = real_princomps.T.matmul(hebb_princomps)", "default=25, type=int, required=False ) parser.add_argument( \"--data_dimension\", help=\"Dimension of synthetic data\", default=100, type=int, required=False" ]
[ "from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it will provide", "debug, append your name here} ## Contributor Email : {if you debug, append", "here} ## Contributor Email : {if you debug, append your email here} ##", "3, 9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1", "polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using cross-validation scores", "plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE = {:.2e}\".format( degrees[i],", "\"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version:", "<EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append your name", "= [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples)", "degree of freedom \"\"\" ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(),", "plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\",", "Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append your", "true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating", "debug, append your email here} ## Status: active \"\"\" import matplotlib.pyplot as plt", "plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression", "np.pi * X) :param X: :return: \"\"\" return np.cos(1.5 * np.pi * X)", "{if you debug, append your email here} ## Status: active \"\"\" import matplotlib.pyplot", "import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from", "-*- coding: utf-8 -*- \"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt", "to Y by sing function np.cos(1.5 * np.pi * X) :param X: :return:", "xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features),", "sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def", "np.newaxis]), label=\"Model\") # plotting the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\")", "= np.linspace(0, 1, 100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\")", "return np.cos(1.5 * np.pi * X) if __name__ == '__main__': n_samples = 30", "y) # Evaluate the models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y,", "Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME> ## Email:", "in range(len(degrees)): \"\"\" Evaluating and plotting for each degree of freedom \"\"\" ax", "## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if", "sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing", "edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST", "Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin:", "= 30 degrees = [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y =", "y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n", "if __name__ == '__main__': n_samples = 30 degrees = [1, 3, 9, 15]", "X_test = np.linspace(0, 1, 100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]),", "here} ## Status: active \"\"\" import matplotlib.pyplot as plt import numpy as np", "* X) :param X: :return: \"\"\" return np.cos(1.5 * np.pi * X) if", "append your email here} ## Status: active \"\"\" import matplotlib.pyplot as plt import", "= PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:,", "the models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test", "predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and", "Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using cross-validation", ": {if you debug, append your name here} ## Contributor Email : {if", "include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y)", "# Evaluate the models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\",", "Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor", "yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\",", "test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and predicted function", "function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0,", "and plotting for each degree of freedom \"\"\" ax = plt.subplot(1, len(degrees), i", "15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5))", "## Status: active \"\"\" import matplotlib.pyplot as plt import numpy as np from", "from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from", "Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you", "sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X", "Status: active \"\"\" import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model", "cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) # predicting", "pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True", "## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append", "9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14,", "cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1,", "5)) for i in range(len(degrees)): \"\"\" Evaluating and plotting for each degree of", "matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection", "each degree of freedom \"\"\" ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax,", "of freedom \"\"\" ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=())", "Contributor Email : {if you debug, append your email here} ## Status: active", "Limited ## Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/", "on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and predicted", "Packt Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ##", "LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the", "LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures", "plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE = {:.2e}\".format(", "0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating and plotting for each", "true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2,", "100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the", "1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline =", ": {if you debug, append your email here} ## Status: active \"\"\" import", "plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE = {:.2e}\".format( degrees[i], -scores.mean()))", "Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it will", "np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline", "from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given", "plotting the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b',", "as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import", "label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2))", "ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i],", "np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) # predicting on test", "* np.pi * X) if __name__ == '__main__': n_samples = 30 degrees =", "pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models", "PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it will provide its mapping to", "its mapping to Y by sing function np.cos(1.5 * np.pi * X) :param", "models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test =", "-*- \"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited ##", "## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1", "Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1 ##", "## Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ##", "Email : {if you debug, append your email here} ## Status: active \"\"\"", "(\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using cross-validation scores =", "True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\")", "np.random.seed(0) def true_fun(X): \"\"\" given X it will provide its mapping to Y", "true_fun(X): \"\"\" given X it will provide its mapping to Y by sing", "s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE", "utf-8 -*- \"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited", "n_samples = 30 degrees = [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y", "len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression =", "predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\")", "i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression()", "y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)):", "np.linspace(0, 1, 100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") #", "import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it will provide its mapping", "Y by sing function np.cos(1.5 * np.pi * X) :param X: :return: \"\"\"", "import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from", "function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\")", "np.cos(1.5 * np.pi * X) :param X: :return: \"\"\" return np.cos(1.5 * np.pi", "X) :param X: :return: \"\"\" return np.cos(1.5 * np.pi * X) if __name__", "plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score", "{if you debug, append your name here} ## Contributor Email : {if you", "* np.pi * X) :param X: :return: \"\"\" return np.cos(1.5 * np.pi *", "using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0,", "= Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using", "as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import", "https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append your name here} ## Contributor", "the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20,", "<NAME> ## Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1 ## Maintainer:", "= plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False)", "and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\")", "X it will provide its mapping to Y by sing function np.cos(1.5 *", "## Contributor Email : {if you debug, append your email here} ## Status:", "will provide its mapping to Y by sing function np.cos(1.5 * np.pi *", "0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor :", "X) if __name__ == '__main__': n_samples = 30 degrees = [1, 3, 9,", "you debug, append your name here} ## Contributor Email : {if you debug,", "from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0)", "np.pi * X) if __name__ == '__main__': n_samples = 30 degrees = [1,", "import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import", "2018-2019, Packt Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME> ## Email: <EMAIL>", "linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) #", "plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree", "+ np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating and", "scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100)", "'__main__': n_samples = 30 degrees = [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples))", "[1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) *", "= np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i", "* 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating and plotting for", "## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append your name here}", "freedom \"\"\" ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features", "\"\"\" given X it will provide its mapping to Y by sing function", "PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis],", "== '__main__': n_samples = 30 degrees = [1, 3, 9, 15] X =", "label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE =", "<NAME> ## Email: <EMAIL> ## Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug,", "# -*- coding: utf-8 -*- \"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019,", "= cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) #", "## Contributor : {if you debug, append your name here} ## Contributor Email", "given X it will provide its mapping to Y by sing function np.cos(1.5", "email here} ## Status: active \"\"\" import matplotlib.pyplot as plt import numpy as", "# predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True", "* X) if __name__ == '__main__': n_samples = 30 degrees = [1, 3,", "## Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME>", "np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in", "Linkedin: https://www.linkedin.com/in/linus1/ ## Contributor : {if you debug, append your name here} ##", "append your name here} ## Contributor Email : {if you debug, append your", "cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\"", "30 degrees = [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X)", "np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating and plotting", "plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\" Evaluating and plotting for each degree", "provide its mapping to Y by sing function np.cos(1.5 * np.pi * X)", "import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it", "sing function np.cos(1.5 * np.pi * X) :param X: :return: \"\"\" return np.cos(1.5", "import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X):", "def true_fun(X): \"\"\" given X it will provide its mapping to Y by", "function np.cos(1.5 * np.pi * X) :param X: :return: \"\"\" return np.cos(1.5 *", "polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)])", "by sing function np.cos(1.5 * np.pi * X) :param X: :return: \"\"\" return", "cv=10) X_test = np.linspace(0, 1, 100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:,", "range(len(degrees)): \"\"\" Evaluating and plotting for each degree of freedom \"\"\" ax =", ":return: \"\"\" return np.cos(1.5 * np.pi * X) if __name__ == '__main__': n_samples", "Contributor : {if you debug, append your name here} ## Contributor Email :", "data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and predicted function plt.plot(X_test,", "sklearn.preprocessing import PolynomialFeatures np.random.seed(0) def true_fun(X): \"\"\" given X it will provide its", "np.cos(1.5 * np.pi * X) if __name__ == '__main__': n_samples = 30 degrees", "linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using cross-validation scores = cross_val_score(pipeline,", "plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting the True and predicted function plt.plot(X_test, true_fun(X_test),", "# plotting the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y,", "\"\"\" ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features =", "= true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): \"\"\"", ":param X: :return: \"\"\" return np.cos(1.5 * np.pi * X) if __name__ ==", "it will provide its mapping to Y by sing function np.cos(1.5 * np.pi", "label=\"Model\") # plotting the True and predicted function plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X,", "numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline", "mapping to Y by sing function np.cos(1.5 * np.pi * X) :param X:", "\"\"\" return np.cos(1.5 * np.pi * X) if __name__ == '__main__': n_samples =", "coding: utf-8 -*- \"\"\" ## Author: <NAME> ## Copyright: Copyright 2018-2019, Packt Publishing", "pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using cross-validation scores = cross_val_score(pipeline, X[:,", "i in range(len(degrees)): \"\"\" Evaluating and plotting for each degree of freedom \"\"\"", "for each degree of freedom \"\"\" ax = plt.subplot(1, len(degrees), i + 1)", "Evaluating and plotting for each degree of freedom \"\"\" ax = plt.subplot(1, len(degrees),", "X: :return: \"\"\" return np.cos(1.5 * np.pi * X) if __name__ == '__main__':", "y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) # predicting on test data", "__name__ == '__main__': n_samples = 30 degrees = [1, 3, 9, 15] X", "degrees = [1, 3, 9, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) +", "plt.plot(X_test, true_fun(X_test), label=\"True function\") plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\") plt.xlabel(\"x\") plt.ylabel(\"y\") plt.xlim((0, 1))", "your name here} ## Contributor Email : {if you debug, append your email", "name here} ## Contributor Email : {if you debug, append your email here}", "X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for", "1)) plt.ylim((-2, 2)) plt.legend(loc=\"best\") plt.title(\"Degree {}\\n TEST MSE = {:.2e}\".format( degrees[i], -scores.mean())) plt.show()", "\"\"\" Evaluating and plotting for each degree of freedom \"\"\" ax = plt.subplot(1,", "\"\"\" import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression", "Evaluate the models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10)", "active \"\"\" import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import", "np.newaxis], y) # Evaluate the models using cross-validation scores = cross_val_score(pipeline, X[:, np.newaxis],", "plotting for each degree of freedom \"\"\" ax = plt.subplot(1, len(degrees), i +", "1, 100) # predicting on test data plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\") # plotting", "scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) # predicting on test data plt.plot(X_test,", "X[:, np.newaxis], y, scoring=\"neg_mean_squared_error\", cv=10) X_test = np.linspace(0, 1, 100) # predicting on", "you debug, append your email here} ## Status: active \"\"\" import matplotlib.pyplot as", "your email here} ## Status: active \"\"\" import matplotlib.pyplot as plt import numpy", "+ 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline", "for i in range(len(degrees)): \"\"\" Evaluating and plotting for each degree of freedom", "Copyright: Copyright 2018-2019, Packt Publishing Limited ## Version: 0.0.1 ## Maintainer: <NAME> ##", "= LinearRegression() pipeline = Pipeline([(\"polynomial_features\", polynomial_features), (\"linear_regression\", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate" ]
[]
[ "10. Practice exceptions/divide.py def positive_divide(numerator, denominator): try: result = numerator / denominator except", "numerator / denominator except ZeroDivisionError: return 0 except Exception as x: raise x", "/ denominator except ZeroDivisionError: return 0 except Exception as x: raise x if", "result = numerator / denominator except ZeroDivisionError: return 0 except Exception as x:", "Exception as x: raise x if result < 0: raise ValueError return result", "return 0 except Exception as x: raise x if result < 0: raise", "denominator): try: result = numerator / denominator except ZeroDivisionError: return 0 except Exception", "= numerator / denominator except ZeroDivisionError: return 0 except Exception as x: raise", "positive_divide(numerator, denominator): try: result = numerator / denominator except ZeroDivisionError: return 0 except", "try: result = numerator / denominator except ZeroDivisionError: return 0 except Exception as", "except Exception as x: raise x if result < 0: raise ValueError return", "Practice exceptions/divide.py def positive_divide(numerator, denominator): try: result = numerator / denominator except ZeroDivisionError:", "exceptions/divide.py def positive_divide(numerator, denominator): try: result = numerator / denominator except ZeroDivisionError: return", "ZeroDivisionError: return 0 except Exception as x: raise x if result < 0:", "<filename>days/day101/Bite 10. Practice exceptions/divide.py def positive_divide(numerator, denominator): try: result = numerator / denominator", "denominator except ZeroDivisionError: return 0 except Exception as x: raise x if result", "except ZeroDivisionError: return 0 except Exception as x: raise x if result <", "0 except Exception as x: raise x if result < 0: raise ValueError", "def positive_divide(numerator, denominator): try: result = numerator / denominator except ZeroDivisionError: return 0" ]
[ "import argparse from utils import Audio def sample_wav_audio(path): audio = Audio() mel =", "= audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples): audio", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store',", "np import argparse from utils import Audio def sample_wav_audio(path): audio = Audio() mel", "Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name,", "utils import Audio def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples =", "save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in samples:", "embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser", "avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser =", "= audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples): audio = Audio() avg_embed", "np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True)", "parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args =", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True)", "sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args", "for mel in samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed", "in samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed / 5", "audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__", "k=5) return samples def save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32)", "def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5)", "+= embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__':", "audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples): audio = Audio() avg_embed =", "from utils import Audio def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples", "= audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if", "= np.zeros(256, dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel) avg_embed += embed", "= Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples def", "argparse from utils import Audio def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path)", "audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples): audio =", "import numpy as np import argparse from utils import Audio def sample_wav_audio(path): audio", "= argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args()", "argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args() samples", "numpy as np import argparse from utils import Audio def sample_wav_audio(path): audio =", "= avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser()", "mel in samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed /", "avg_embed += embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ ==", "= Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel)", "samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in samples: embed", "return samples def save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for", "/ 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store',", "def save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in", "samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy',", "audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in samples: embed =", "parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args() samples =", "action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args() samples = sample_wav_audio(args.path)", "embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed = avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed)", "type=str, required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args() samples = sample_wav_audio(args.path) save_embeddings(args.name,", "required=True) parser.add_argument('--name', action='store', type=str, required=True) args = parser.parse_args() samples = sample_wav_audio(args.path) save_embeddings(args.name, samples)", "as np import argparse from utils import Audio def sample_wav_audio(path): audio = Audio()", "mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples):", "dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed =", "audio = Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128, k=5) return samples", "np.zeros(256, dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel) avg_embed += embed avg_embed", "import Audio def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel,", "avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name',", "samples = audio.mel_sample(mel, width=128, k=5) return samples def save_embeddings(name, samples): audio = Audio()", "samples def save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel", "avg_embed / 5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path',", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str, required=True) parser.add_argument('--name', action='store', type=str,", "width=128, k=5) return samples def save_embeddings(name, samples): audio = Audio() avg_embed = np.zeros(256,", "Audio() avg_embed = np.zeros(256, dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel) avg_embed", "5 np.save(f'./embeddings/{name}.npy', avg_embed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', action='store', type=str,", "avg_embed = np.zeros(256, dtype=np.float32) for mel in samples: embed = audio.mel_to_embed(mel) avg_embed +=", "Audio def sample_wav_audio(path): audio = Audio() mel = audio.audio_to_mel(path) samples = audio.mel_sample(mel, width=128," ]
[ "boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0,", "feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names,", "early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective,", "categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000,", "eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)", "class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0,", "callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight,", "early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X,", "subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y,", "or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)", "return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score,", "early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None,", "random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth,", "reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type,", "eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X,", "self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain,", "eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self,", "learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda,", "verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)],", "callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None,", "eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score,", "def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None,", "super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric,", "self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name,", "eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y,", "X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto',", "objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent,", "lightgbm import LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,", "init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor,", "feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,", "silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None,", "early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20,", "[(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class", "subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds", "eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set", "callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score,", "reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type,", "silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None,", "init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return", "init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,", "importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None,", "eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier,", "sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return", "= early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,", "class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0,", "colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None,", "reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,", "n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None,", "n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None,", "return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric,", "class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type)", "DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,", "silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,", "num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0,", "eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt',", "importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin,", "__init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0,", "reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None,", "X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto',", "super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0,", "silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,", "DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,", "def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20,", "min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None):", "importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin,", "colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier,", "eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or", "n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state,", "random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None,", "fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,", "categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,", "min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None,", "subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None,", "y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor):", "colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor,", "<reponame>shippeo/dss-plugin-model-lightgbm from lightgbm import LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,", "n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate,", "subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1,", "eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks,", "n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate,", "y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name,", "early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20,", "super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0,", "early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X,", "learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0,", "fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto',", "eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31,", "y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature,", "categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,", "eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y,", "self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0,", "max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,", "init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks,", "= early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,", "eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set", "eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,", "min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def", "eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X,", "random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth,", "min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds", "min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X,", "max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,", "class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split',", "y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None):", "self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain,", "from lightgbm import LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1,", "y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto',", "LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None,", "self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,", "LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None,", "importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None,", "eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score,", "reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None,", "early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight,", "import LGBMClassifier, LGBMRegressor class DkuLGBMClassifier(LGBMClassifier): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000,", "min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type) def fit(self,", "eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def", "subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs,", "super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,", "feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names,", "early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective,", "eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or", "verbose=verbose, feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) class DkuLGBMRegressor(LGBMRegressor): def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1,", "early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight,", "reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds = early_stopping_rounds super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,", "n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None,", "objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True,", "subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', early_stopping_rounds=None, early_stopping=None): self.early_stopping_rounds =", "def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,", "num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0,", "sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None):", "verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None): return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)]," ]
[ "time import numpy as np import resource from scipy.sparse import csr_matrix from sklearn.metrics", "(acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall *", "'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons,", "\" % (len(dupDictItems), time() - c)) c = time() for i, (duplicateBug, listOfDuplicates)", "similarityList in similarityListByDuplicate: pos = biggestKValue + 1 cur = 0 listOfDuplicates =", "additional chunks.append(l[begin:end]) begin = end return chunks if __name__ == '__main__': parser =", "= [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in", "[t[0] for t in similarityList])) if i % 200 == 0 and i", "If k is empty list so recall rate \" \"is not calculated\") parser.add_argument('--model',", "sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values", "list c = time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type ==", "duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id)", "bug of %d in %f\" % (i, len(duplicateByBugId), time() - c)) c =", "def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) /", "'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s)", "order the bugs by probability of being duplicate similarityList = sorted(bugScores, key=lambda x:", "1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id)", "and i != 0: logger.info(\"Processed %d Duplicate bug of %d in %f\" %", "prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc *", "experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def", "data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1", "\"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold',", "def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs)", "Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue + 1 cur =", "= len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to a validation with proportion:", "funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q =", "csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2)", "ptrs2 = [0] for bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 =", "empty list so recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model", "import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds = set() duplicateByBugId =", "vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart =", "if args.model_type == 'tfidf': # Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer", "arguments.model == 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif", "bug of %d in %f\" % ( os.getpid(), i, len(duplicateBugs), time() - c))", "from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from", "in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't import torch without allocating", "biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to", "= model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32,", "for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 = [] indices1", "in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx * batchSize", "validation['k']) valitionBugIds = {} # Prepare data to prediction validationPairs = [] targets", "= F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the", "a tuple (-1,None) when it is ending its work. count += 1 #", "bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf':", "predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK", "data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk", "= vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart", "% ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels:", "chunks(l, n): chunkSize = int(len(l) / n) remaining = len(l) % n chunks", "= vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1),", "recall and f1 accum = accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets)", "Array, Queue from multiprocessing.sharedctypes import RawArray from queue import Empty from time import", "from util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load Model and", "acc = accum / len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy:", "import math from ctypes import c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes", "ujson import sys import math from ctypes import c_ulong from multiprocessing import Array,", "%(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args() print(args)", "= q.get() if id == -1: # The process send a tuple (-1,None)", "i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score of duplicate bug", "1 break cur += 1 if cur >= biggestKValue: break for idx, k", "= pickle.load(open(args.model, 'rb')) # Generate bag of words representation for each bug texts", "q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 == 0 and", "begin = end return chunks if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k',", "batchStart = batchIdx * batchSize data1 = [] indices1 = [] ptrs1 =", "= CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for each", "= time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs =", "# Generate bag of words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for", "count == len(processes): break else: similarityScoresList[id] = scoreList except Empty as e: pass", "cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not", "len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue +", "for l in f: l = l.strip() if len(l) == 0: break bug1Id,", "'rb')) # Generate bag of words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId))", "scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from", "shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i])", "for pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] =", "import Array, Queue from multiprocessing.sharedctypes import RawArray from queue import Empty from time", "bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs):", "pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label))", "range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) /", "= 1 remaining -= 1 else: additional = 0 end = begin +", "dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the score for tf-idf.", "additional = 1 remaining -= 1 else: additional = 0 end = begin", "hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate:", "= csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for", "parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\")", "end return chunks if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True,", "[] if args.model_type == 'tfidf': # Load Model global vectorByBug vectorByBug = {}", "except Empty as e: pass logger.info( \"Total time to calculate cosine similarity of", "import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text", "output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else:", "duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes =", "model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction =", "help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8,", "accum / len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\"", "recommend list c = time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type", "= duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] =", "similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction =", "logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of %d in %f\" % (", "+ additional chunks.append(l[begin:end]) begin = end return chunks if __name__ == '__main__': parser", "[0] data2 = [] indices2 = [] ptrs2 = [0] for otherBug in", "enumerate(dupDictItems): # Calculate the similarity score of duplicate bug with each bug if", "begin = 0 for i in range(n): if remaining != 0: additional =", "Empty from time import time import numpy as np import resource from scipy.sparse", "calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) /", "for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize", "n chunks = [] begin = 0 for i in range(n): if remaining", "__init__(self, dict): for k, v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs):", "range(nBatches): batchStart = batchIdx * batchSize data1 = [] indices1 = [] ptrs1", "torch without allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\", "[] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize", "return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) / n) remaining", "range(n): if remaining != 0: additional = 1 remaining -= 1 else: additional", "help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes',", "%d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type == 'tfidf':", "return predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger() c = time() logger.info(", "accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)), list(np.around(f1", "= getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity and prediction", "bugIds)]) if i % 20 == 0 and i != 0: logger.info(\"TF-IDF: Process", "used in the recall ratio. If k is empty list so recall rate", "bugIdsOfValidation: continue if bugId in listOfDuplicates: pos = cur + 1 break cur", "ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]):", "predictions, normalize=False) acc = accum / len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets,", "datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k", "words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors", "each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer)", "[] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart", "+ 1 break cur += 1 if cur >= biggestKValue: break for idx,", "c = time() # For each different proportion, we calculate the recall rate", "!= 0: additional = 1 remaining -= 1 else: additional = 0 end", "batchSize) firstBugPairs = [] secondBugPairs = [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1])", "SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds =", "encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity", "time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds)", "Model global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag", "pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2]", "= parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds,", "* len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue", "'tfidf': # Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb'))", "= [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation", "in zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \"", "\"Total time to calculate cosine similarity of %d duplicate bugs: %s \" %", "with bug id and its similarity score. bugScores = [(bugId, score) for bugId,", "%d\" % (os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start + i,", "if k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k,", "similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and", "i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs =", "%f\" % ( os.getpid(), i, len(duplicateBugs), time() - c)) c = time() q.put([-1,", "in range(n): if remaining != 0: additional = 1 remaining -= 1 else:", "batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1,", "= duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List =", "for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart =", "= [] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx *", "batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data)", "Break the loop when all processes were terminated if count == len(processes): break", "embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds,", "in similarityListByDuplicate: pos = biggestKValue + 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug]", "output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs,", "n) remaining = len(l) % n chunks = [] begin = 0 for", "so recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset',", "return chunks if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list", "validation with proportion: %d\" % validation['k']) valitionBugIds = {} # Prepare data to", "chunks = [] begin = 0 for i in range(n): if remaining !=", "else: # We can't import torch without allocating a GPU in Cedar cluster.", "1 else: additional = 0 end = begin + chunkSize + additional chunks.append(l[begin:end])", "arr = RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite", "set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set())", "in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating", "tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of words representation for each bug", "logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue + 1", "= [0] for bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1]", "reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i % 200 == 0", "tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't", "logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate, hit, total)) # Calculate Acc,", "* 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100,", "bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if", "help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args", "= [] ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data)", "duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for", "l = l.strip() if len(l) == 0: break bug1Id, bug2Id, label = l.split(',')", "bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds", "= {} pairs = [] for l in f: l = l.strip() if", "model.eval() # Set the similarity and prediction functions if arguments.model == 'classification': similarityFunction", "similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval':", "{}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)), list(np.around(f1 * 100, decimals=3)))) logger.info(\"\")", "logging import os import pickle import random import ujson import sys import math", "enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't import torch without allocating a", "def loadData(filePath): f = open(filePath, 'r') bugIds = set() duplicateByBugId = {} pairs", "= time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': #", "= len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx in", "= len(l) % n chunks = [] begin = 0 for i in", "i in range(n): if remaining != 0: additional = 1 remaining -= 1", "import random import ujson import sys import math from ctypes import c_ulong from", "bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type ==", "ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))", "too slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using %d processes\"", "i != 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of %d in", "= True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs:", "F from util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load Model", "= recallKs[-1] total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to a", "100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)),", "action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger()", "csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i", "bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k, v in", "parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int,", "parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger =", "# Calculate Acc, precision, recall and f1 accum = accuracy_score(targets, predictions, normalize=False) acc", "== 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s,", "ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1)", "batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx in", "= biggestKValue + 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in", "%H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k =", "0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not in bugIdsOfValidation:", "batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2))", "= vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim =", "k is empty list so recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\")", "%d in %f\" % (i, len(duplicateByBugId), time() - c)) c = time() #", "(%d/%d) \" % (k, rate, hit, total)) # Calculate Acc, precision, recall and", "= time() logger.info( \"Process %s started to compute the similarity for %d duplicate", "similarity for %d duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for", "vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize)", "= [] secondBugPairs = [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for", "vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1]", "len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class", "import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import", "= [] if args.model_type == 'tfidf': # Load Model global vectorByBug vectorByBug =", "bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and create tuples with bug id", "matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2,", "representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors =", "parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s", "[] ptrs2 = [0] for bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1", "= 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug]", "len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)])", "score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs):", "(duplicateBug, duplicateBug) and create tuples with bug id and its similarity score. bugScores", "0 end = begin + chunkSize + additional chunks.append(l[begin:end]) begin = end return", "for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor,", "key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i %", "def parallel(start, duplicateBugs, q): logger = logging.getLogger() c = time() logger.info( \"Process %s", "0 and i != 0: logger.info(\"Processed %d Duplicate bug of %d in %f\"", "= 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = []", "csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2)", "[0] for bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data)", "len(duplicateBugs) startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong,", "if arguments.model == 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction", "# The process send a tuple (-1,None) when it is ending its work.", "= end return chunks if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+',", "% (os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db),", "chunkSize + additional chunks.append(l[begin:end]) begin = end return chunks if __name__ == '__main__':", "indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 =", "[0] data2 = [] indices2 = [] ptrs2 = [0] for bug1, bug2", "200 == 0 and i != 0: logger.info(\"Processed %d Duplicate bug of %d", "# Sort in descending order the bugs by probability of being duplicate similarityList", "c)) c = time() # For each different proportion, we calculate the recall", "np import resource from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from", "matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192", "bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't import torch without", "bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] =", "= l.strip() if len(l) == 0: break bug1Id, bug2Id, label = l.split(',') label", "predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger() c = time() logger.info( \"Process", "= math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx in range(nBatches): batchStart =", "chunks.append(l[begin:end]) begin = end return chunks if __name__ == '__main__': parser = argparse.ArgumentParser()", "n): chunkSize = int(len(l) / n) remaining = len(l) % n chunks =", "bug with each bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores", "pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n):", "in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices)", "with proportion: %d\" % validation['k']) valitionBugIds = {} # Prepare data to prediction", "= duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict):", "= softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda)", "(i, len(duplicateByBugId), time() - c)) c = time() # For each different proportion,", "softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load Model and DataHandlers arguments =", "from multiprocessing.sharedctypes import RawArray from queue import Empty from time import time import", "arr, q))) startToWrite += len(chunk) for p in processes: p.start() count = 0", "indices2 = [] ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart + batchSize]:", "args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate", "arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda()", "cur >= biggestKValue: break for idx, k in enumerate(recallKs): if k < pos:", "(np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) / n) remaining =", "of tf-idf model using %d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs =", "(duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score of duplicate bug with", "its similarity score. bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores)", "batchIdx * batchSize data1 = [] indices1 = [] ptrs1 = [0] data2", "logger.info(\"Calculating metrics to a validation with proportion: %d\" % validation['k']) valitionBugIds = {}", "% (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type == 'tfidf': predictions", "similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) /", "args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q)))", "data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2", "len(l) % n chunks = [] begin = 0 for i in range(n):", "predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores", "= [] targets = [] bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1,", "similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches =", "= 0 while True: try: id, scoreList = q.get() if id == -1:", "softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if", "duplicate pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs:", "math from ctypes import c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes import", "the precision, recall, accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue =", "BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer,", "hit, total)) # Calculate Acc, precision, recall and f1 accum = accuracy_score(targets, predictions,", "calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 == 0 and i != 0:", "of %d in %f\" % (i, len(duplicateByBugId), time() - c)) c = time()", "required=True, help=\"list of the values of k to be used in the recall", "setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = [] nBatches", "vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2,", "= pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0,", "{} pairs = [] for l in f: l = l.strip() if len(l)", "getModel import torch import torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable from", "count = 0 while True: try: id, scoreList = q.get() if id ==", "len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc", "continue if bugId in listOfDuplicates: pos = cur + 1 break cur +=", "being duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t", "to a validation with proportion: %d\" % validation['k']) valitionBugIds = {} # Prepare", "open(filePath, 'r') bugIds = set() duplicateByBugId = {} pairs = [] for l", "similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug,", "batchSize) similarityScores = [] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize", "hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k,", "= csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2),", "indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 =", "to calculate all duplicate pairs recommend list c = time() logger.info(\"Calculating similarity scores\")", "f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum,", "= len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = []", "bugId != duplicateBug] # Sort in descending order the bugs by probability of", "for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score of duplicate", "= [] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 =", "Generate the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById", "'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) #", "type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s", "resource from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import", "cur + 1 break cur += 1 if cur >= biggestKValue: break for", "similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs,", "targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)),", "it is ending its work. count += 1 # Break the loop when", "when it is ending its work. count += 1 # Break the loop", "in range(nBatches): batchStart = batchIdx * batchSize data1 = [] indices1 = []", "accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets) prec, recall, f1, _ =", "!= 0: logger.info(\"Processed %d Duplicate bug of %d in %f\" % (i, len(duplicateByBugId),", "i != 0: logger.info(\"Processed %d Duplicate bug of %d in %f\" % (i,", "CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F from util.torch_util import softmaxPrediction,", "the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById =", "= l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label ==", "validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate", "+ batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model", "a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction,", "bugId in listOfDuplicates: pos = cur + 1 break cur += 1 if", "< pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k, hit in", "calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and create tuples", "embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity and prediction functions if arguments.model", "= csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2),", "similarityListByDuplicate: pos = biggestKValue + 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for", "vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: #", "label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List", "this step because the sequential version was too slow. import multiprocessing logger.info(\"Calculating cosine", "= predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK =", "import RawArray from queue import Empty from time import time import numpy as", "[] for l in f: l = l.strip() if len(l) == 0: break", "score. bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId", "import torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor", "in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for p in processes:", "with each bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores =", "Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate", "== 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model", "( os.getpid(), i, len(duplicateBugs), time() - c)) c = time() q.put([-1, None]) def", "loadData(filePath): f = open(filePath, 'r') bugIds = set() duplicateByBugId = {} pairs =", "q))) startToWrite += len(chunk) for p in processes: p.start() count = 0 while", "-1: # The process send a tuple (-1,None) when it is ending its", "to calculate cosine similarity of %d duplicate bugs: %s \" % (len(dupDictItems), time()", "len(chunk) for p in processes: p.start() count = 0 while True: try: id,", "in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start,", "= batchIdx * batchSize data1 = [] indices1 = [] ptrs1 = [0]", "prediction functions if arguments.model == 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction", "Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import", "BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf': # Load", "if bugId in listOfDuplicates: pos = cur + 1 break cur += 1", "data.dataset import BugDataExtractor # Load Model and DataHandlers arguments = Obj({ 'load': args.model,", "print(args) global bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs,", "p in processes: p.start() count = 0 while True: try: id, scoreList =", "pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs,", "send a tuple (-1,None) when it is ending its work. count += 1", "len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to a validation with proportion: %d\"", "in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite,", "in descending order the bugs by probability of being duplicate similarityList = sorted(bugScores,", "= sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for", "for tf-idf. We had to parallel this step because the sequential version was", "import os import pickle import random import ujson import sys import math from", "{} # Prepare data to prediction validationPairs = [] targets = [] bugIdsOfValidation", "k to be used in the recall ratio. If k is empty list", "duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id,", "duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in", "(len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type == 'tfidf': predictions =", "= accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets) prec, recall, f1, _", "len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)), list(np.around(f1 *", "args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction", "bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) #", "different proportion, we calculate the recall rate and the precision, recall, accuracy recallKs", "normalize=False) acc = accum / len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions)", "We can't import torch without allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning", "pair (duplicateBug, duplicateBug) and create tuples with bug id and its similarity score.", "experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional", "for duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes = [] similarityScoresList =", "predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs =", "from ctypes import c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray", "processes: p.start() count = 0 while True: try: id, scoreList = q.get() if", "args.model_type == 'tfidf': # Calculating the score for tf-idf. We had to parallel", "len(l) == 0: break bug1Id, bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id,", "Load Model and DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False,", "= math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1]", "300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model =", "from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import", "x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i % 200 ==", "= logging.getLogger() c = time() logger.info( \"Process %s started to compute the similarity", "all duplicate pairs recommend list c = time() logger.info(\"Calculating similarity scores\") dupDictItems =", "Process %s processed %d Duplicate bug of %d in %f\" % ( os.getpid(),", "batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1", "args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds,", "bug id and its similarity score. bugScores = [(bugId, score) for bugId, score", "= [] bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1, bug2, label =", "sequential version was too slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model", "similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i % 200 == 0 and", "bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1)", "[] indices2 = [] ptrs2 = [0] for bug1, bug2 in pairs[batchStart: batchStart", "BugDataExtractor # Load Model and DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda,", "== 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall", "predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr,", "[0] for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 =", "data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from", "scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the score for", "dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all duplicate pairs", "the sequential version was too slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf", "= vectors[idx] else: # We can't import torch without allocating a GPU in", "total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to a validation with", "TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds", "0 and i != 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of", "calculate the recall rate and the precision, recall, accuracy recallKs = sorted([int(k) for", "args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics", "= {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of words representation for", "biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if", "begin + chunkSize + additional chunks.append(l[begin:end]) begin = end return chunks if __name__", "the score for tf-idf. We had to parallel this step because the sequential", "validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount", "import softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load Model and DataHandlers arguments", "= [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q = Queue()", "global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of", "np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger()", "logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the", "cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l,", "from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer", "as F from util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load", "parallel(start, duplicateBugs, q): logger = logging.getLogger() c = time() logger.info( \"Process %s started", "= duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds,", "= [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches):", "batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 = [] indices1 =", "pairs recommend list c = time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if", "logger.info(\"Processed %d Duplicate bug of %d in %f\" % (i, len(duplicateByBugId), time() -", "probability of being duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0]", "for bugId in similarityList: if bugId not in bugIdsOfValidation: continue if bugId in", "= [0] for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2", "try: id, scoreList = q.get() if id == -1: # The process send", "the similarity for %d duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start))", "%d\" % validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions =", "import sys import math from ctypes import c_ulong from multiprocessing import Array, Queue", "nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1,", "For each different proportion, we calculate the recall rate and the precision, recall,", "bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\" %", "to parallel this step because the sequential version was too slow. import multiprocessing", "in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx]", "'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model ==", "[] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs = [] for", "multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using %d processes\" % (args.nb_processes)) funcArgs", "model.cuda() encoderContainer.cuda() # Generate the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor =", "batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s", "q): logger = logging.getLogger() c = time() logger.info( \"Process %s started to compute", "nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx in range(nBatches): batchStart", "# Calculate the similarity score of duplicate bug with each bug if args.model_type", "k, v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024", "ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]):", "to prediction validationPairs = [] targets = [] bugIdsOfValidation = set() for pairIndex", "for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId]", "pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k, v in dict.items(): setattr(self,", "the similarity and prediction functions if arguments.model == 'classification': similarityFunction = model[1] if", "if cur >= biggestKValue: break for idx, k in enumerate(recallKs): if k <", "/ n) remaining = len(l) % n chunks = [] begin = 0", "numpy as np import resource from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score,", "bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1,", "duplicateBug] # Sort in descending order the bugs by probability of being duplicate", "bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf': # Load Model", "idx, k in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall", "embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval()", "ctypes import c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray from", "c)) c = time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the", "ratio. If k is empty list so recall rate \" \"is not calculated\")", "if i % 20 == 0 and i != 0: logger.info(\"TF-IDF: Process %s", "+= len(chunk) for p in processes: p.start() count = 0 while True: try:", "logger.info(\"Calculating cosine similarity of tf-idf model using %d processes\" % (args.nb_processes)) funcArgs =", "# Calculate Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for", "= list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf': # Load Model global", "in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 ==", "= [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug]", "and i != 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of %d", "= RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite +=", "Queue() processes = [] similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0 for", "random import ujson import sys import math from ctypes import c_ulong from multiprocessing", "math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs = [] for bug1, bug2 in", "% validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById,", "= Queue() processes = [] similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0", "batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model ==", "= duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the score for tf-idf. We", "to be used in the recall ratio. If k is empty list so", "slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using %d processes\" %", "* len(duplicateBugs) startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr =", "% 20 == 0 and i != 0: logger.info(\"TF-IDF: Process %s processed %d", "'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate", "time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score of", "label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets)))", "break cur += 1 if cur >= biggestKValue: break for idx, k in", "len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k'])", "duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return", "processes were terminated if count == len(processes): break else: similarityScoresList[id] = scoreList except", "range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches", "class Obj(object): def __init__(self, dict): for k, v in dict.items(): setattr(self, k, v)", "Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue + 1 cur", "predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK = [0] * len(recallKs)", "ending its work. count += 1 # Break the loop when all processes", "bugs: %s \" % (len(dupDictItems), time() - c)) c = time() for i,", "= begin + chunkSize + additional chunks.append(l[begin:end]) begin = end return chunks if", "Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to", "import torch import torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable from data.dataset", "True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount", "loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = []", "-= 1 else: additional = 0 end = begin + chunkSize + additional", "if i % 200 == 0 and i != 0: logger.info(\"Processed %d Duplicate", "is ending its work. count += 1 # Break the loop when all", "Obj(object): def __init__(self, dict): for k, v in dict.items(): setattr(self, k, v) def", "recall, accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total", "== 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) ==", "indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) -", "+ batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices)", "in enumerate(dupDictItems): # Calculate the similarity score of duplicate bug with each bug", "import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r')", "% (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall", "1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs", "for p in processes: p.start() count = 0 while True: try: id, scoreList", "data2 = [] indices2 = [] ptrs2 = [0] for bug1, bug2 in", "i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize =", "Calculate Acc, precision, recall and f1 accum = accuracy_score(targets, predictions, normalize=False) acc =", "ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1,", "= 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not in", "indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1),", "end = begin + chunkSize + additional chunks.append(l[begin:end]) begin = end return chunks", "pickle.load(open(args.model, 'rb')) # Generate bag of words representation for each bug texts =", "duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self,", "duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds =", "0 while True: try: id, scoreList = q.get() if id == -1: #", "import torch without allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings,", "%.3f (%d/%d) \" % (k, rate, hit, total)) # Calculate Acc, precision, recall", "sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds = set() duplicateByBugId", "%(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args() print(args) global", "elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr", "parallel this step because the sequential version was too slow. import multiprocessing logger.info(\"Calculating", "RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk)", "in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\")", "tf-idf. We had to parallel this step because the sequential version was too", "# We can't import torch without allocating a GPU in Cedar cluster. from", "c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray from queue import", "cur += 1 if cur >= biggestKValue: break for idx, k in enumerate(recallKs):", "bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for p in", "batchSize = 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs =", "precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall:", "biggestKValue: break for idx, k in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx]", "argparse import logging import os import pickle import random import ujson import sys", "accum = accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets) prec, recall, f1,", "parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d", "in dupDictItems] q = Queue() processes = [] similarityScoresList = [0] * len(duplicateBugs)", "listOfDuplicates: pos = cur + 1 break cur += 1 if cur >=", "= [] similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0 for idx, chunk", "\\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F from util.torch_util import", "type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\")", "startToWrite += len(chunk) for p in processes: p.start() count = 0 while True:", "logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start", "= batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s =", "predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return", "of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type", "def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize)", "valitionBugIds = {} # Prepare data to prediction validationPairs = [] targets =", "bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]),", "args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings,", "k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k, hit", "nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) >", "from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase", "model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity and", "(k, rate, hit, total)) # Calculate Acc, precision, recall and f1 accum =", "validationPairs = [] targets = [] bugIdsOfValidation = set() for pairIndex in validation['indexes']:", "= csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for", "generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F", "/ batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx", "duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List", "duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k, v in dict.items():", "'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments)", "vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1)", "similarity of tf-idf model using %d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs", "logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec *", "* batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart", "f1 accum = accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets) prec, recall,", "shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))", "for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1]", "getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s]", "/ batchSize) firstBugPairs = [] secondBugPairs = [] for bug1, bug2 in validationPairs:", "of %d duplicate bugs: %s \" % (len(dupDictItems), time() - c)) c =", "+ 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if", "%s started to compute the similarity for %d duplicate bugs. Start idx: %d\"", "isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger", "idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't import torch", "of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation)))", "1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores)", "validationPairs) # Calculate Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\")", "'r') bugIds = set() duplicateByBugId = {} pairs = [] for l in", "= duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not in bugIdsOfValidation: continue if", "== 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List", "= tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We", "shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i])", "%d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" %", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of", "prediction validationPairs = [] targets = [] bugIdsOfValidation = set() for pairIndex in", "data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1,", "%d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates", "process send a tuple (-1,None) when it is ending its work. count +=", "+ chunkSize + additional chunks.append(l[begin:end]) begin = end return chunks if __name__ ==", "be used in the recall ratio. If k is empty list so recall", "lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity and prediction functions if", "[] similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0 for idx, chunk in", "otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data)", "for %d duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i,", "arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() #", "total)) # Calculate Acc, precision, recall and f1 accum = accuracy_score(targets, predictions, normalize=False)", "k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset", "data to prediction validationPairs = [] targets = [] bugIdsOfValidation = set() for", "pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting", "= getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda)", "bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput =", "of duplicate bug with each bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0)", "predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK = [0]", "True: try: id, scoreList = q.get() if id == -1: # The process", "sys import math from ctypes import c_ulong from multiprocessing import Array, Queue from", "pos = biggestKValue + 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId", "values of k to be used in the recall ratio. If k is", "biggestKValue + 1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList:", "# Break the loop when all processes were terminated if count == len(processes):", "break for idx, k in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] +=", "when all processes were terminated if count == len(processes): break else: similarityScoresList[id] =", "if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id]", "dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate", "similarity score. bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if", "CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for each bug", "getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if", "argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of k to be used", "len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List)", "Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate = float(hit) / total", "predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec", "scoreList except Empty as e: pass logger.info( \"Total time to calculate cosine similarity", "validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs)", "of k to be used in the recall ratio. If k is empty", "cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize =", "in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2]", "help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\",", "q = Queue() processes = [] similarityScoresList = [0] * len(duplicateBugs) startToWrite =", "bug1Id, bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id)", "for i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i", "len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim", "8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = [] for", "predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize)", "level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args() print(args) global bugIds", "cosine similarity of %d duplicate bugs: %s \" % (len(dupDictItems), time() - c))", "batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart +", "= calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and create", "bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim", "validationPairs): batchSize = 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs", "version was too slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using", "i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i %", "20 == 0 and i != 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate", "duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId,", "= [0] data2 = [] indices2 = [] ptrs2 = [0] for otherBug", "start)) for i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if", "== 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object):", "in processes: p.start() count = 0 while True: try: id, scoreList = q.get()", "os import pickle import random import ujson import sys import math from ctypes", "= int(len(l) / n) remaining = len(l) % n chunks = [] begin", "- 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score", "texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId", "args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor", "parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda',", "1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores", "for k, hit in zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d:", "return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k, v", "= validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type", "while True: try: id, scoreList = q.get() if id == -1: # The", "for i in range(n): if remaining != 0: additional = 1 remaining -=", "l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1:", "= generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all duplicate pairs recommend list", "predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for", "nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores =", "parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId,", "using %d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug,", "processed %d Duplicate bug of %d in %f\" % ( os.getpid(), i, len(duplicateBugs),", "validation in validations: logger.info(\"Calculating metrics to a validation with proportion: %d\" % validation['k'])", "# Start to calculate all duplicate pairs recommend list c = time() logger.info(\"Calculating", "required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\")", "= 0 for i in range(n): if remaining != 0: additional = 1", "\" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True)", "indices2 = [] ptrs2 = [0] for bug1, bug2 in pairs[batchStart: batchStart +", "metrics to a validation with proportion: %d\" % validation['k']) valitionBugIds = {} #", "ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2,", "False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer,", "f: l = l.strip() if len(l) == 0: break bug1Id, bug2Id, label =", "= vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 =", "duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List", "similarity of %d duplicate bugs: %s \" % (len(dupDictItems), time() - c)) c", "functions if arguments.model == 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda() predictionFunction =", "precision, recall and f1 accum = accuracy_score(targets, predictions, normalize=False) acc = accum /", "for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug]", "# Prepare data to prediction validationPairs = [] targets = [] bugIdsOfValidation =", "rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate,", "[int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation =", "logger.info(\"Predicting pair labels: %d\" % validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs)", "= sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if", "dupDictItems] q = Queue() processes = [] similarityScoresList = [0] * len(duplicateBugs) startToWrite", "bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2))", "each different proportion, we calculate the recall rate and the precision, recall, accuracy", "else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK = [0] *", "pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def", "if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations']", "predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger() c", "and DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300,", "util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor # Load Model and DataHandlers", "os.getpid(), i, len(duplicateBugs), time() - c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug,", "if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs) #", "q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches", "as e: pass logger.info( \"Total time to calculate cosine similarity of %d duplicate", "label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] =", "dataExtractor, encoderContainer) # Start to calculate all duplicate pairs recommend list c =", "= int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List =", "nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2", "= accum / len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f", "batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif", "predictionInput = [bug1s, bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output", "in f: l = l.strip() if len(l) == 0: break bug1Id, bug2Id, label", "compute the similarity for %d duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs),", "of words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds]", "np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" %", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the", "create tuples with bug id and its similarity score. bugScores = [(bugId, score)", "Empty as e: pass logger.info( \"Total time to calculate cosine similarity of %d", "from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f =", "(len(dupDictItems), time() - c)) c = time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems):", "= [] begin = 0 for i in range(n): if remaining != 0:", "\\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F from", "recallKs[-1] total = len(duplicateByBugId) for validation in validations: logger.info(\"Calculating metrics to a validation", "duplicate bugs: %s \" % (len(dupDictItems), time() - c)) c = time() for", "cosine similarity of tf-idf model using %d processes\" % (args.nb_processes)) funcArgs = []", "loop when all processes were terminated if count == len(processes): break else: similarityScoresList[id]", "args = parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k]", "in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset =", "return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs)", "0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0:", "pairs = [] for l in f: l = l.strip() if len(l) ==", "csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i", "similarityScores = [] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1", "bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort in descending", "'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments", "a validation with proportion: %d\" % validation['k']) valitionBugIds = {} # Prepare data", "in %f\" % ( os.getpid(), i, len(duplicateBugs), time() - c)) c = time()", "remaining = len(l) % n chunks = [] begin = 0 for i", "= math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs = [] for bug1, bug2", "= True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs:", "1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score =", "# Calculating the score for tf-idf. We had to parallel this step because", "nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs = [] for bug1,", "precision, recall, accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1]", "/ len(targets) prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" %", "= cur + 1 break cur += 1 if cur >= biggestKValue: break", "args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments =", "[] bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex]", "validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx * batchSize bug1s", "similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx", "were terminated if count == len(processes): break else: similarityScoresList[id] = scoreList except Empty", "torch import torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable from data.dataset import", "similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove", "sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation", "= scoreList except Empty as e: pass logger.info( \"Total time to calculate cosine", "similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate", "if len(l) == 0: break bug1Id, bug2Id, label = l.split(',') label = int(label)", "and its similarity score. bugScores = [(bugId, score) for bugId, score in zip(bugIds,", "Acc, precision, recall and f1 accum = accuracy_score(targets, predictions, normalize=False) acc = accum", "= precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision:", "= argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of k to be", "bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True", "allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\", "float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate, hit, total))", "vectorByBug, bugIds)]) if i % 20 == 0 and i != 0: logger.info(\"TF-IDF:", "bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1,", "[0] * len(duplicateBugs) startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr", "else: similarityScoresList[id] = scoreList except Empty as e: pass logger.info( \"Total time to", "nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1,", "elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda:", "terminated if count == len(processes): break else: similarityScoresList[id] = scoreList except Empty as", "for validation in validations: logger.info(\"Calculating metrics to a validation with proportion: %d\" %", "= vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1,", "F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding", "{}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)), list(np.around(f1 * 100, decimals=3))))", "from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity", "work. count += 1 # Break the loop when all processes were terminated", "arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in", "rate, hit, total)) # Calculate Acc, precision, recall and f1 accum = accuracy_score(targets,", "score of duplicate bug with each bug if args.model_type == 'tfidf': similarityScores =", "\" % (k, rate, hit, total)) # Calculate Acc, precision, recall and f1", "GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb,", "import resource from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise", "}) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings,", "[] indices2 = [] ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart +", "if remaining != 0: additional = 1 remaining -= 1 else: additional =", "duplicateBug) and create tuples with bug id and its similarity score. bugScores =", "default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s',", "args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model == 'classification':", "step because the sequential version was too slow. import multiprocessing logger.info(\"Calculating cosine similarity", "indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) -", "id, scoreList = q.get() if id == -1: # The process send a", "args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input)", "= [] indices1 = [] ptrs1 = [0] data2 = [] indices2 =", "Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList", "calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None,", "label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2)", "logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\" % validation['k']) if", "if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for each bug logger.info(\"Generating Embeddings\")", "batchSize = 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 =", "predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold,", "ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1,", "logger = logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for", "duplicateBugs, q): logger = logging.getLogger() c = time() logger.info( \"Process %s started to", "import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as", "for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in", "similarityScores) if bugId != duplicateBug] # Sort in descending order the bugs by", "validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type ==", "= cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize", "accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import", "global bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations", "the loop when all processes were terminated if count == len(processes): break else:", "cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args =", "if bugId not in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos = cur", "in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs = len(pairs)", "proportion, we calculate the recall rate and the precision, recall, accuracy recallKs =", "Start to calculate all duplicate pairs recommend list c = time() logger.info(\"Calculating similarity", "1 # Break the loop when all processes were terminated if count ==", "for pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions", "nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of k", "in range(nBatches): batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]),", "[] indices1 = [] ptrs1 = [0] data2 = [] indices2 = []", "(np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger =", "c = time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity", "vectors[idx] else: # We can't import torch without allocating a GPU in Cedar", "of duplicate pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of", "x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i % 200", "1 if cur >= biggestKValue: break for idx, k in enumerate(recallKs): if k", "all processes were terminated if count == len(processes): break else: similarityScoresList[id] = scoreList", "= float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate, hit,", "for idx, k in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] += 1", "targets = [] bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1, bug2, label", "remaining -= 1 else: additional = 0 end = begin + chunkSize +", "/ batchSize) similarityScores = [] for batchIdx in range(nBatches): batchStart = batchIdx *", "bugId not in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos = cur +", "We had to parallel this step because the sequential version was too slow.", "0: break bug1Id, bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label))", "= [] ptrs1 = [0] data2 = [] indices2 = [] ptrs2 =", "listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not in bugIdsOfValidation: continue", "+= 1 logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate =", "- 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return", "= logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for k", "len(duplicateBugs), time() - c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds):", "tuple (-1,None) when it is ending its work. count += 1 # Break", "started to compute the similarity for %d duplicate bugs. Start idx: %d\" %", "from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer", "int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id,", "# Generate the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers)", "if bugId != duplicateBug] # Sort in descending order the bugs by probability", "import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database", "processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in", "'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if", "if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q):", "%d Duplicate bug of %d in %f\" % ( os.getpid(), i, len(duplicateBugs), time()", "duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId not in bugIdsOfValidation: continue if bugId", "break else: similarityScoresList[id] = scoreList except Empty as e: pass logger.info( \"Total time", "%f\" % (i, len(duplicateByBugId), time() - c)) c = time() # For each", "chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel,", "from time import time import numpy as np import resource from scipy.sparse import", "% (k, rate, hit, total)) # Calculate Acc, precision, recall and f1 accum", "= loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate =", "encoderContainer) # Start to calculate all duplicate pairs recommend list c = time()", "for batchIdx in range(nBatches): batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart", "== 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda)", "Set the similarity and prediction functions if arguments.model == 'classification': similarityFunction = model[1]", "its work. count += 1 # Break the loop when all processes were", "dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments)", "if args.model_type == 'tfidf': # Calculating the score for tf-idf. We had to", "== 'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output:", "= Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True })", "q.get() if id == -1: # The process send a tuple (-1,None) when", "bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx *", "nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath,", "indices1 = [] ptrs1 = [0] data2 = [] indices2 = [] ptrs2", "pos = cur + 1 break cur += 1 if cur >= biggestKValue:", "help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG,", "1 logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate = float(hit)", "predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger() c = time()", "and f1 accum = accuracy_score(targets, predictions, normalize=False) acc = accum / len(targets) prec,", "data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) -", "= [] nBatches = math.ceil(float(len(validationPairs)) / batchSize) firstBugPairs = [] secondBugPairs = []", "import BugDataExtractor # Load Model and DataHandlers arguments = Obj({ 'load': args.model, 'cuda':", "indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1,", "[] targets = [] bugIdsOfValidation = set() for pairIndex in validation['indexes']: bug1, bug2,", "== 0: break bug1Id, bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id,", "import pickle import random import ujson import sys import math from ctypes import", "bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction,", "args=(startToWrite, arr, q))) startToWrite += len(chunk) for p in processes: p.start() count =", "% validation['k']) valitionBugIds = {} # Prepare data to prediction validationPairs = []", "zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort in descending order the bugs", "encoderContainer.cuda() # Generate the embedding for each bug logger.info(\"Generating Embeddings\") dataExtractor = BugDataExtractor(bugReportDataset,", "import argparse import logging import os import pickle import random import ujson import", "to compute the similarity for %d duplicate bugs. Start idx: %d\" % (os.getpid(),", "%s processed %d Duplicate bug of %d in %f\" % ( os.getpid(), i,", "precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription", "if count == len(processes): break else: similarityScoresList[id] = scoreList except Empty as e:", "(os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug,", "Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start", "c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs", "% n chunks = [] begin = 0 for i in range(n): if", "= [] ptrs2 = [0] for bug1, bug2 in pairs[batchStart: batchStart + batchSize]:", "similarity score of duplicate bug with each bug if args.model_type == 'tfidf': similarityScores", "The process send a tuple (-1,None) when it is ending its work. count", "processes = [] similarityScoresList = [0] * len(duplicateBugs) startToWrite = 0 for idx,", "getVariable from data.dataset import BugDataExtractor # Load Model and DataHandlers arguments = Obj({", "each bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug,", "[concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds):", "torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable from data.dataset import BugDataExtractor #", "k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation in validations:", "proportion: %d\" % validation['k']) valitionBugIds = {} # Prepare data to prediction validationPairs", "predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall", "model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32, np.uint8)):", "pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2,", "k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = [] nBatches =", "nbDim)) score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def", "[0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos =", "[int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for", "= BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all", "= [] indices2 = [] ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart", "chunks if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of", "- c)) c = time() # For each different proportion, we calculate the", "( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair labels: %d\"", "in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel", "list so recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\")", "bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all duplicate pairs recommend", "pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\"", "%s \" % (len(dupDictItems), time() - c)) c = time() for i, (duplicateBug,", "accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total =", "(args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q", "= BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf': #", "!= duplicateBug] # Sort in descending order the bugs by probability of being", "1 remaining -= 1 else: additional = 0 end = begin + chunkSize", "sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector", "sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList])) if i", "TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds = set() duplicateByBugId = {}", "from queue import Empty from time import time import numpy as np import", "bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 = []", "bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr)", "# Load Model and DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional':", "calculate all duplicate pairs recommend list c = time() logger.info(\"Calculating similarity scores\") dupDictItems", "i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 == 0 and i !=", "# Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) #", "rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input',", "% (len(dupDictItems), time() - c)) c = time() for i, (duplicateBug, listOfDuplicates) in", "firstBugPairs = [] secondBugPairs = [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2])", "duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i, db in", "logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate = float(hit) /", "(%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)),", "True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\"", "= [] for l in f: l = l.strip() if len(l) == 0:", "secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart:", "duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes = [] similarityScoresList = [0]", "duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the score for tf-idf. We had", "%d duplicate bugs: %s \" % (len(dupDictItems), time() - c)) c = time()", "if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById,", "Calculate the similarity score of duplicate bug with each bug if args.model_type ==", "= [] indices2 = [] ptrs2 = [0] for bug1, bug2 in pairs[batchStart:", "rate and the precision, recall, accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k])", "of the values of k to be used in the recall ratio. If", "for duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue + 1 cur = 0", "1 cur = 0 listOfDuplicates = duplicateByBugId[duplicateBug] for bugId in similarityList: if bugId", "logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger = logging.getLogger() args = parser.parse_args()", "bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for idx,", "+ batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model ==", "secondBugPairs = [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in", "args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput", "= [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in similarityListByDuplicate: pos", "calculate cosine similarity of %d duplicate bugs: %s \" % (len(dupDictItems), time() -", "dict): for k, v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize", "+= 1 # Break the loop when all processes were terminated if count", "list(bugIds) similarityListByDuplicate = [] if args.model_type == 'tfidf': # Load Model global vectorByBug", "time() - c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize", "if id == -1: # The process send a tuple (-1,None) when it", "import logging import os import pickle import random import ujson import sys import", "else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug)", "logger = logging.getLogger() c = time() logger.info( \"Process %s started to compute the", "similarityList: if bugId not in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos =", "logger.info( \"Process %s started to compute the similarity for %d duplicate bugs. Start", "generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all duplicate pairs recommend list c", "recall, f1, _ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100,", "Queue from multiprocessing.sharedctypes import RawArray from queue import Empty from time import time", "time() - c)) c = time() # For each different proportion, we calculate", "encoderContainer.eval() model.eval() # Set the similarity and prediction functions if arguments.model == 'classification':", "arguments) encoderContainer.eval() model.eval() # Set the similarity and prediction functions if arguments.model ==", "== 'tfidf': # Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model,", "'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers,", "args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) / n) remaining = len(l) %", "- 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return", "# For each different proportion, we calculate the recall rate and the precision,", "k=%d: %.3f (%d/%d) \" % (k, rate, hit, total)) # Calculate Acc, precision,", "%d Duplicate bug of %d in %f\" % (i, len(duplicateByBugId), time() - c))", "BugDataExtractor(bugReportDataset, dataHandlers) bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer) # Start to calculate all duplicate", "parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float, default=None, help=\"\") parser.add_argument('--nb_processes', type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable", "bugIds args.recall_ratio_k = [int(k) for k in args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations =", "in zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort in descending order the", "= time() # For each different proportion, we calculate the recall rate and", "= [0] * len(duplicateBugs) startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)):", "batchIdx in range(nBatches): batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart +", "idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in chunk])", "pickle import random import ujson import sys import math from ctypes import c_ulong", "Calculating the score for tf-idf. We had to parallel this step because the", "bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx", "logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount", "startToWrite = 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId)", "# Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and its", "time() # For each different proportion, we calculate the recall rate and the", "bugIds = set() duplicateByBugId = {} pairs = [] for l in f:", "csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import", "[duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes = [] similarityScoresList", "bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of", "bugId in similarityList: if bugId not in bugIdsOfValidation: continue if bugId in listOfDuplicates:", "can't import torch without allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import", "listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score of duplicate bug with each", "remaining != 0: additional = 1 remaining -= 1 else: additional = 0", "descending order the bugs by probability of being duplicate similarityList = sorted(bugScores, key=lambda", "getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F from util.torch_util import softmaxPrediction, getVariable", "Prepare data to prediction validationPairs = [] targets = [] bugIdsOfValidation = set()", "time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating", "was too slow. import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using %d", "{} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of words representation for each", "= similarityScoresList.pop(0) else: similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair", "the values of k to be used in the recall ratio. If k", "== 'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda()", "pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True", "time() - c)) c = time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): #", "firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx in range(nBatches): batchStart = batchIdx * batchSize bug1s =", "for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for p", "!= 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of %d in %f\"", "type=int, default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S',", "enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId in chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr,", "l in f: l = l.strip() if len(l) == 0: break bug1Id, bug2Id,", "0: additional = 1 remaining -= 1 else: additional = 0 end =", "duplicate bug with each bug if args.model_type == 'tfidf': similarityScores = similarityScoresList.pop(0) else:", "bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and create tuples with bug", "shape=(len(ptrs1) - 1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))", "for t in similarityList])) if i % 200 == 0 and i !=", "similarityList])) if i % 200 == 0 and i != 0: logger.info(\"Processed %d", "concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import", "similarity and prediction functions if arguments.model == 'classification': similarityFunction = model[1] if args.cuda:", "0 for i in range(n): if remaining != 0: additional = 1 remaining", "indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2) for i in", "the recall ratio. If k is empty list so recall rate \" \"is", "len(processes): break else: similarityScoresList[id] = scoreList except Empty as e: pass logger.info( \"Total", "help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', ) logger", "similarityListByDuplicate = [] if args.model_type == 'tfidf': # Load Model global vectorByBug vectorByBug", "- c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize =", "additional = 0 end = begin + chunkSize + additional chunks.append(l[begin:end]) begin =", "set() duplicateByBugId = {} pairs = [] for l in f: l =", "> args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) / n) remaining = len(l)", "the bugs by probability of being duplicate similarityList = sorted(bugScores, key=lambda x: x[1],", "bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) == 0:", "validations: logger.info(\"Calculating metrics to a validation with proportion: %d\" % validation['k']) valitionBugIds =", "vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of words representation", "1, nbDim)) pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score =", "from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f = open(filePath, 'r') bugIds = set()", "total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate, hit, total)) # Calculate", "batchSize = 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores =", "i % 200 == 0 and i != 0: logger.info(\"Processed %d Duplicate bug", "time import time import numpy as np import resource from scipy.sparse import csr_matrix", "batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model", "label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List)", "# Set the similarity and prediction functions if arguments.model == 'classification': similarityFunction =", "[(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug] #", "parser = argparse.ArgumentParser() parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of k to", "of being duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for", "0: logger.info(\"Processed %d Duplicate bug of %d in %f\" % (i, len(duplicateByBugId), time()", "ptrs1 = [0] data2 = [] indices2 = [] ptrs2 = [0] for", "math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for", "score in zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort in descending order", "without allocating a GPU in Cedar cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL,", "not in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos = cur + 1", "labels: %d\" % validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else: predictions", "validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1] = True valitionBugIds[bug2] =", "default=8, help=\"\") parser.add_argument('--cuda', action=\"store_true\", help=\"enable cuda.\") logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', )", "= getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval': predictionInput = [bug1s,", "data2 = [] indices2 = [] ptrs2 = [0] for otherBug in bugIds[batchStart:", "== -1: # The process send a tuple (-1,None) when it is ending", "bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches):", "logger.info( \"Total time to calculate cosine similarity of %d duplicate bugs: %s \"", "% (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems]", "bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2", "ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1))", "k, hit in zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f", "i % 20 == 0 and i != 0: logger.info(\"TF-IDF: Process %s processed", "ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k, v in dict.items(): setattr(self, k,", "tuples with bug id and its similarity score. bugScores = [(bugId, score) for", "+= 1 if cur >= biggestKValue: break for idx, k in enumerate(recallKs): if", "in similarityList: if bugId not in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos", "c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024", "def __init__(self, dict): for k, v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById,", "[] duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes", "for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts)", "in similarityList])) if i % 200 == 0 and i != 0: logger.info(\"Processed", "for i in range(score.shape[0]): similarityScores.append(score[i][i]) return similarityScores def predictTFIDF(pairs): batchSize = 8192 nPairs", "Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and its similarity", "Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t", "%d duplicate bugs. Start idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i, db", "duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id]", "if len(duplicateBug1List) == 0: duplicateByBugId[bug1Id] = duplicateBug1List duplicateBug1List.add(bug2Id) duplicateBug2List = duplicateByBugId.get(bug2Id, set()) if", "parser.add_argument('--recall_ratio_k', nargs='+', required=True, help=\"list of the values of k to be used in", "= [0] data2 = [] indices2 = [] ptrs2 = [0] for bug1,", "pass logger.info( \"Total time to calculate cosine similarity of %d duplicate bugs: %s", "pair labels: %d\" % validation['k']) if args.model_type == 'tfidf': predictions = predictTFIDF(validationPairs) else:", "i, len(duplicateBugs), time() - c)) c = time() q.put([-1, None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug,", "as np import resource from scipy.sparse import csr_matrix from sklearn.metrics import accuracy_score, precision_recall_fscore_support", "None]) def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches =", "args.model_type == 'tfidf': # Load Model global vectorByBug vectorByBug = {} tfIdfVectorizer =", "import time import numpy as np import resource from scipy.sparse import csr_matrix from", "scoreList = q.get() if id == -1: # The process send a tuple", "by probability of being duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug,", "Generate bag of words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId", "l.strip() if len(l) == 0: break bug1Id, bug2Id, label = l.split(',') label =", "Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug, similarityList in", "args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() # Generate the embedding for each bug logger.info(\"Generating", "Duplicate bug of %d in %f\" % ( os.getpid(), i, len(duplicateBugs), time() -", "Calculate Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating Recall Rate\") for duplicateBug,", "_ = precision_recall_fscore_support(targets, predictions) logger.info(\"Accuracy: %.3f (%d/%d)\" % (acc * 100, accum, len(targets)))", "args.cuda) # Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and", "vectorByBug vectorByBug = {} tfIdfVectorizer = pickle.load(open(args.model, 'rb')) # Generate bag of words", "valitionBugIds[bug1] = True valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate", "if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction = F.cosine_similarity", "= [bug1s, bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output =", "in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 =", "'tfidf': # Calculating the score for tf-idf. We had to parallel this step", "calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch import torch.nn.functional as F from util.torch_util", "import numpy as np import resource from scipy.sparse import csr_matrix from sklearn.metrics import", "logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k) for k in", "% (i, len(duplicateByBugId), time() - c)) c = time() # For each different", "multiprocessing.sharedctypes import RawArray from queue import Empty from time import time import numpy", "- 1, nbDim)) matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score", "== 0 and i != 0: logger.info(\"Processed %d Duplicate bug of %d in", "in listOfDuplicates: pos = cur + 1 break cur += 1 if cur", "0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for bugId", "help=\"list of the values of k to be used in the recall ratio.", "'retrieval': similarityFunction = F.cosine_similarity predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda) if args.cuda: model.cuda() encoderContainer.cuda() #", "tf-idf model using %d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug", "= [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems] q = Queue() processes = []", "= 0 for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)): arr = RawArray(c_ulong, [int(bugId) for", "1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores", "getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the similarity and prediction functions", "nbDim = bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1", "% 200 == 0 and i != 0: logger.info(\"Processed %d Duplicate bug of", "/ total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" % (k, rate, hit, total)) #", "= predictDeepLearningModel(bugEmbeddingsById, validationPairs) # Calculate Recall Rate hitsPerRateK = [0] * len(recallKs) logger.info(\"Calculating", "nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1 = vectorByBug[duplicateBug] similarityScores = [] nbDim =", "valitionBugIds[bug2] = True bugIdsOfValidation.add(bug1) bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of", "logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100, decimals=3)), list(np.around(recall * 100, decimals=3)), list(np.around(f1 * 100,", "* batchSize data1 = [] indices1 = [] ptrs1 = [0] data2 =", "= getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set", "each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds] vectors = tfIdfVectorizer.transform(texts) for", "and prediction functions if arguments.model == 'classification': similarityFunction = model[1] if args.cuda: similarityFunction.cuda()", "similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf': # Calculating the score", "the recall rate and the precision, recall, accuracy recallKs = sorted([int(k) for k", "similarityScoresList[id] = scoreList except Empty as e: pass logger.info( \"Total time to calculate", "= predictionFunction(predictionInput).data.cpu().numpy() for pr in output: if isinstance(pr, (np.float32, np.uint8)): predictions.append(pr) else: predictions.append(pr[-1])", "the similarity score of duplicate bug with each bug if args.model_type == 'tfidf':", "cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer", "Model and DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size':", "from data.dataset import BugDataExtractor # Load Model and DataHandlers arguments = Obj({ 'load':", "0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def", "label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label", "len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx in range(nBatches):", "%.3f (%d/%d)\" % (acc * 100, accum, len(targets))) logger.info(\"Precision: {}\\tRecall: {}\\tF1:{}\".format(list(np.around(prec * 100,", "bugs by probability of being duplicate similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True)", "= time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate the similarity score", "v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions", "ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[bug2] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) nbDim = vectorByBug[bug1].shape[1] pairBug1 = csr_matrix((data1,", "listOfDuplicates in dupDictItems] q = Queue() processes = [] similarityScoresList = [0] *", "of %d in %f\" % ( os.getpid(), i, len(duplicateBugs), time() - c)) c", "bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if", "in bugIdsOfValidation: continue if bugId in listOfDuplicates: pos = cur + 1 break", "c = time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items() if args.model_type == 'tfidf':", "pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2)", "pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset) bugIds = list(bugIds)", "arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model == 'classification': predictionInput =", "similarityFunction, bugEmbeddingsById, bugIds, args.cuda) # Remove pair (duplicateBug, duplicateBug) and create tuples with", "in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions =", "recall rate and the precision, recall, accuracy recallKs = sorted([int(k) for k in", "vectorByBug[bugId] = vectors[idx] else: # We can't import torch without allocating a GPU", "for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else: # We can't import", "set() for pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2)) valitionBugIds[bug1]", "and the precision, recall, accuracy recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue", "[] ptrs2 = [0] for otherBug in bugIds[batchStart: batchStart + batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices)", "Duplicate bug of %d in %f\" % (i, len(duplicateByBugId), time() - c)) c", "RawArray from queue import Empty from time import time import numpy as np", "= open(filePath, 'r') bugIds = set() duplicateByBugId = {} pairs = [] for", "db in enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20", "idx: %d\" % (os.getpid(), len(duplicateBugs), start)) for i, db in enumerate(duplicateBugs): q.put([start +", ") logger = logging.getLogger() args = parser.parse_args() print(args) global bugIds args.recall_ratio_k = [int(k)", "bag of words representation for each bug texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in", "chunkSize = int(len(l) / n) remaining = len(l) % n chunks = []", "(-1,None) when it is ending its work. count += 1 # Break the", "else: additional = 0 end = begin + chunkSize + additional chunks.append(l[begin:end]) begin", "% ( os.getpid(), i, len(duplicateBugs), time() - c)) c = time() q.put([-1, None])", "hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK): rate", "for bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices)", "== 'tfidf': # Calculating the score for tf-idf. We had to parallel this", "data1 = [] indices1 = [] ptrs1 = [0] data2 = [] indices2", "for k, v in dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize =", "bug1, bug2 in pairs[batchStart: batchStart + batchSize]: bugEmbedding1 = vectorByBug[bug1] data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1))", "enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for", "processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for p in processes: p.start() count", "from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from", "hit in zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d)", "continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate Results:\") for k, hit in zip(recallKs, hitsPerRateK):", "duplicateBug, similarityList in similarityListByDuplicate: pos = biggestKValue + 1 cur = 0 listOfDuplicates", "len(duplicateByBugId), time() - c)) c = time() # For each different proportion, we", "bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set()) if len(duplicateBug1List) ==", "v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = [] nBatches = math.ceil(float(len(validationPairs))", "TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath): f", "bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy() for", "is empty list so recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type',", "bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId !=", "import accuracy_score, precision_recall_fscore_support from sklearn.metrics.pairwise import cosine_similarity from data.bug_report_database import BugReportDatabase from data.preprocessing", "int(len(l) / n) remaining = len(l) % n chunks = [] begin =", "because the sequential version was too slow. import multiprocessing logger.info(\"Calculating cosine similarity of", "DataHandlers arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif':", "= set() duplicateByBugId = {} pairs = [] for l in f: l", "+ batchSize]: data1.extend(bugEmbedding1.data) indices1.extend(bugEmbedding1.indices) ptrs1.append(len(indices1)) bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 =", "== 0 and i != 0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug", "similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True) similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList]))", "recall rate \" \"is not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\")", "t in similarityList])) if i % 200 == 0 and i != 0:", "= 0 end = begin + chunkSize + additional chunks.append(l[begin:end]) begin = end", "duplicateByBugId = {} pairs = [] for l in f: l = l.strip()", "recallKs = sorted([int(k) for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId)", "[bug1s, bug2s] elif arguments.model == 'classification': predictionInput = model[1](bug1s, bug2s) output = predictionFunction(predictionInput).data.cpu().numpy()", "zip(recallKs, hitsPerRateK): rate = float(hit) / total logger.info(\"\\t\\t k=%d: %.3f (%d/%d) \" %", "= cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def", "not calculated\") parser.add_argument('--model', help=\"model\") parser.add_argument('--model_type', help=\"model type\") parser.add_argument('--bug_dataset', help=\"\") parser.add_argument('--input', required=True) parser.add_argument('--retrieval_threshold', type=float,", "batchSize data1 = [] indices1 = [] ptrs1 = [0] data2 = []", "count += 1 # Break the loop when all processes were terminated if", "from multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray from queue import Empty", "getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval() model.eval() # Set the", "Sort in descending order the bugs by probability of being duplicate similarityList =", "similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l) / n)", "vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))", "multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray from queue import Empty from", "and create tuples with bug id and its similarity score. bugScores = [(bugId,", "we calculate the recall rate and the precision, recall, accuracy recallKs = sorted([int(k)", "== len(processes): break else: similarityScoresList[id] = scoreList except Empty as e: pass logger.info(", "break bug1Id, bug2Id, label = l.split(',') label = int(label) pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id)", "math.ceil(float(nPairs) / batchSize) similarityScores = [] for batchIdx in range(nBatches): batchStart = batchIdx", "matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(matrix1, matrix2)", "if arguments.model == 'retrieval': predictionInput = [bug1s, bug2s] elif arguments.model == 'classification': predictionInput", "duplicate pairs recommend list c = time() logger.info(\"Calculating similarity scores\") dupDictItems = duplicateByBugId.items()", "in %f\" % (i, len(duplicateByBugId), time() - c)) c = time() # For", "model using %d processes\" % (args.nb_processes)) funcArgs = [] duplicateBugs = [duplicateBug for", "in validations: logger.info(\"Calculating metrics to a validation with proportion: %d\" % validation['k']) valitionBugIds", "f = open(filePath, 'r') bugIds = set() duplicateByBugId = {} pairs = []", "score for tf-idf. We had to parallel this step because the sequential version", "%d in %f\" % ( os.getpid(), i, len(duplicateBugs), time() - c)) c =", "= set() for pairIndex in validation['indexes']: bug1, bug2, label = pairs[pairIndex] validationPairs.append((bug1, bug2))", "range(nBatches): batchStart = batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda)", "for k in args.recall_ratio_k]) biggestKValue = recallKs[-1] total = len(duplicateByBugId) for validation in", "logging.getLogger() c = time() logger.info( \"Process %s started to compute the similarity for", "time() logger.info( \"Process %s started to compute the similarity for %d duplicate bugs.", "import Empty from time import time import numpy as np import resource from", "[] ptrs1 = [0] data2 = [] indices2 = [] ptrs2 = [0]", ">= biggestKValue: break for idx, k in enumerate(recallKs): if k < pos: continue", "p.start() count = 0 while True: try: id, scoreList = q.get() if id", "in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int) def chunks(l, n): chunkSize = int(len(l)", "%d\" % ( np.count_nonzero(np.asarray(targets)), len(targets))) logger.debug(\"Amount of bugs: %d\" % (len(bugIdsOfValidation))) logger.info(\"Predicting pair", "def chunks(l, n): chunkSize = int(len(l) / n) remaining = len(l) % n", "queue import Empty from time import time import numpy as np import resource", "lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments) encoderContainer.eval()", "import ujson import sys import math from ctypes import c_ulong from multiprocessing import", "bugEmbedding2 = vectorByBug[otherBug] data2.extend(bugEmbedding2.data) indices2.extend(bugEmbedding2.indices) ptrs2.append(len(indices2)) matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) -", "score = cosine_similarity(pairBug1, pairBug2) for i in range(score.shape[0]): similarityScores.append(score[i][i]) return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int)", "+ i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 == 0 and i", "bugIds] vectors = tfIdfVectorizer.transform(texts) for idx, bugId in enumerate(bugIds): vectorByBug[bugId] = vectors[idx] else:", "recall ratio. If k is empty list so recall rate \" \"is not", "batchIdx * batchSize bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart:", "= bugEmbedding1.shape[1] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 =", "indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim)) score = cosine_similarity(pairBug1, pairBug2) for i in", "0: logger.info(\"TF-IDF: Process %s processed %d Duplicate bug of %d in %f\" %", "Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers,", "= 8192 nPairs = len(pairs) nBatches = math.ceil(float(nPairs) / batchSize) similarityScores = []", "arguments = Obj({ 'load': args.model, 'cuda': args.cuda, 'summary_bidirectional': False, 'classifier_hidden_size': 300, 'classifier_mul_dif': True", "'classifier_hidden_size': 300, 'classifier_mul_dif': True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model", "pairs.append((bug1Id, bug2Id, label)) bugIds.add(bug1Id) bugIds.add(bug2Id) if label == 1: duplicateBug1List = duplicateByBugId.get(bug1Id, set())", "in the recall ratio. If k is empty list so recall rate \"", "k in enumerate(recallKs): if k < pos: continue hitsPerRateK[idx] += 1 logger.info(\"Recall Rate", "import BugReportDatabase from data.preprocessing import concatenateSummaryAndDescription from experiments.sparse_vector import TokenizerStemmer from nltk import", "dict.items(): setattr(self, k, v) def predictDeepLearningModel(bugEmbeddingsById, validationPairs): batchSize = 1024 predictions = []", "args.recall_ratio_k] bugIds, duplicateByBugId, pairs, validations = loadData(args.input) biggestValidation = validations[-1] bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset)", "[] secondBugPairs = [] for bug1, bug2 in validationPairs: firstBugPairs.append(bugEmbeddingsById[bug1]) secondBugPairs.append(bugEmbeddingsById[bug2]) for batchIdx", "id and its similarity score. bugScores = [(bugId, score) for bugId, score in", "enumerate(duplicateBugs): q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)]) if i % 20 == 0", "c = time() logger.info( \"Process %s started to compute the similarity for %d", "id == -1: # The process send a tuple (-1,None) when it is", "\"Process %s started to compute the similarity for %d duplicate bugs. Start idx:", "for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort in", "import TokenizerStemmer from nltk import TreebankWordTokenizer, SnowballStemmer from sklearn.feature_extraction.text import TfidfVectorizer def loadData(filePath):", "= {} # Prepare data to prediction validationPairs = [] targets = []", "bugIdsOfValidation.add(bug2) targets.append(max(0, label)) logger.debug(\"Amount of duplicate pairs: %d\\tAmount of pairs: %d\" % (", "= model[1] if args.cuda: similarityFunction.cuda() predictionFunction = softmaxPrediction elif arguments.model == 'retrieval': similarityFunction", "bugIds): batchSize = 1024 nPairs = len(bugIds) nBatches = math.ceil(float(nPairs) / batchSize) bugEmbedding1", "cluster. from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \\ calculateSimilarityScoresDL, \\ CosinePrediction, getDataHandlerLexiconEmb, getModel import torch", "score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug] # Sort", "time to calculate cosine similarity of %d duplicate bugs: %s \" % (len(dupDictItems),", "had to parallel this step because the sequential version was too slow. import", "batchSize]), args.cuda) bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda) if arguments.model == 'retrieval':", "[] begin = 0 for i in range(n): if remaining != 0: additional", "e: pass logger.info( \"Total time to calculate cosine similarity of %d duplicate bugs:", "chunk]) processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q))) startToWrite += len(chunk) for p in processes: p.start()", "%d\" % validation['k']) valitionBugIds = {} # Prepare data to prediction validationPairs =", "import multiprocessing logger.info(\"Calculating cosine similarity of tf-idf model using %d processes\" % (args.nb_processes))", "- c)) c = time() for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems): # Calculate", "[] for batchIdx in range(nBatches): batchStart = batchIdx * batchSize data1 = []", "import c_ulong from multiprocessing import Array, Queue from multiprocessing.sharedctypes import RawArray from queue", "duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations'] class Obj(object): def __init__(self, dict): for k,", "True }) dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments) encoderContainer, model = getModel(dataHandlers, lexicons,", "else: predictions.append(pr[-1]) return predictions def parallel(start, duplicateBugs, q): logger = logging.getLogger() c =", "nargs='+', required=True, help=\"list of the values of k to be used in the", "set()) if len(duplicateBug2List) == 0: duplicateByBugId[bug2Id] = duplicateBug2List duplicateBug2List.add(bug1Id) return bugIds, duplicateByBugId, pairs," ]
[ "instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(),", "difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 =", "output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no more than float32 rounding", "feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1", "+ y target = ((dist < 50) & (s > 20)) | ((x+2*y)", "points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M =", "= sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 #", "1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x + y target =", "50], y: [0, 50] # such that the difference in output is maximized", "50] # such that the difference in output is maximized opt = veritas.Optimizer(minimize=at,", "\"(should be no more than float32 rounding error)\") # Look in a 100×100", "\"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst =", "(minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(),", "one in box x: [0, 50], y: [0, 50] # such that the", "veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1", "sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M", "# max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to find", "fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to find the two", "[25, 75], y: [50, 80] # - one in box x: [0, 50],", "params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\",", "\"train\")]) features = [\"x\", \"y\"] feat2id = {f : i for i, f", "the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X =", "= np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)):", "0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1,", "[ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50),", "= [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0,", "# Use VERITAS to find the two output configurations # - one in", "2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float)", "difference in output is maximized # This time, share attribute x between the", "opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized)", "xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2", "4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst", "y: [0, 50] # such that the difference in output is maximized opt", "one in box x: [25, 75], y: [50, 80] # - one in", "ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30,", "{f : i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score =", "instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable leafs after prune\",", "= plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0,", "i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv,", "color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p in points[:3]: # 3", "veritas import veritas.xgb # Generate a random dataset np.random.seed(14) N = 2000 x", "in output is maximized # This time, share attribute x between the two", "0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p", "such that the difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(),", "10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id = {f : i for", "max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25,", "random dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y", "ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1)", "# min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE", "after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for", "= abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False,", "Use VERITAS to find the two output configurations # - one in box", "cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to find the", "Look in a 100×100 grid at the values produced by XGBoost Xv =", "y: [50, 80] # - one in box x: [0, 50], y: [0,", "interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0,", "XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at -", "6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\")", "ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to", "enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1)", "grid at the values produced by XGBoost Xv = np.zeros((100*100, 2)) for i,", "interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 =", "plt.show() # EXAMPLE 1 # Use VERITAS to find the two output configurations", "] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0", "prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable leafs after", "of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature", "- one in box x: [0, 50], y: [0, 50] # such that", "= np.sqrt(x**2 + y**2) s = x + y target = ((dist <", "1) # prune instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000)", "such that the difference in output is maximized # This time, share attribute", "ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to find the two output", "in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs", "[xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16,", "color=\"red\")) for p in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]],", "for p in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1],", "j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True)", "2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5,", "sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0:", "# Check whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw", "import xgboost as xgb import numpy as np import matplotlib.pyplot as plt from", "fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p in points[:3]: #", "'.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) #", "match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0,", "target = ((dist < 50) & (s > 20)) | ((x+2*y) > 200)", "| ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target],", "middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first", "0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5,", "#plt.show() X = np.concatenate((x, y), axis=1) # Train a model using XGBoost xtrain", "dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x,", "using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\":", "xgb import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle", "sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1:", "[p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax)", "50), 50, 30, fill=False, color=\"red\")) for p in points[:3]: # 3 best only", "> 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\",", "# instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle", "id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first", "points = [] for sol in opt.solutions(): # convert Solution object to list", "maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 =", "the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25,", "prune instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(),", "first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval", "opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80),", "#print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im =", "bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1))", "the difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0", "axis=1) # Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params", "color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target],", "find the two output configurations # - one in box x: [25, 75],", "50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0)", "y), axis=1) # Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None)", "ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im,", "100×100 grid at the values produced by XGBoost Xv = np.zeros((100*100, 2)) for", "as xgb import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import", "ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for", "xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax =", "ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target],", "[0, 50] # such that the difference in output is maximized opt =", "a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\":", "the difference in output is maximized # This time, share attribute x between", "sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m,", "np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no", "plt from matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate a random", "matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [", "\"nthread\": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\",", "np.sqrt(x**2 + y**2) s = x + y target = ((dist < 50)", "opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in", "the values produced by XGBoost Xv = np.zeros((100*100, 2)) for i, xv in", "instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of", "best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) #", "be no more than float32 rounding error)\") # Look in a 100×100 grid", "# Look in a 100×100 grid at the values produced by XGBoost Xv", "neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 #", "np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas import veritas.xgb", "attribute x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0", "+ y**2) s = x + y target = ((dist < 50) &", "p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max", "print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized)", "2)) for i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2]", "np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0,", "pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target],", "size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2)", "= [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2,", "y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im =", "bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id =", "by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle", "[p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use", "opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num", "XGBoost Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j, yv", "= [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(),", "} bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id", "= veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ]", "color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() #", "= np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])),", "a 100×100 grid at the values produced by XGBoost Xv = np.zeros((100*100, 2))", "50, 30, fill=False, color=\"red\")) for p in points[:3]: # 3 best only l,", "pred_raw), \"(should be no more than float32 rounding error)\") # Look in a", "= {f : i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score", "maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50,", "pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im,", "the two output configurations # - one in box x: [25, 75], y:", "middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second", "reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1,", "opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1", "= xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\",", "color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\",", "before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) #", "xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id = {f :", "np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j,", "to list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0", "figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true", "ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\")", "# convert Solution object to list of intervals indexes by feature id intervals", "y: [0, 50] # such that the difference in output is maximized #", "xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id = {f : i", "in box x: [25, 75], y: [50, 80] # - one in box", "'.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) # Train a model using", "two output configurations # - one in box x: [25, 75], y: [50,", "convert Solution object to list of intervals indexes by feature id intervals =", "maximized # This time, share attribute x between the two instances opt =", "# instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle", "= ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS", "[] for sol in opt.solutions(): # convert Solution object to list of intervals", "= ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50,", "EXAMPLE 1 # Use VERITAS to find the two output configurations # -", "features = [\"x\", \"y\"] feat2id = {f : i for i, f in", "(pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.',", "xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv]", "<gh_stars>1-10 import xgboost as xgb import numpy as np import matplotlib.pyplot as plt", "= bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no more than", "= veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ]", "Solution object to list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol,", "middle of first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second", "x: [0, 50], y: [0, 50] # such that the difference in output", "= 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N,", "Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j, yv in", "a random dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float)", "at.base_score = 0.5 # Check whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at", "as plt from matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate a", "'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() #", "reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points =", "ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\")", "bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no more than float32", "indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0:", "This time, share attribute x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at,", "veritas.xgb # Generate a random dataset np.random.seed(14) N = 2000 x = np.random.randint(0,", "in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0,", "# instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1])", "veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1", "fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\",", "Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = {", "20)) | ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\")", "import Rectangle import veritas import veritas.xgb # Generate a random dataset np.random.seed(14) N", "box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50),", "dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y =", "label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\",", "y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1)", "im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50),", "points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]],", "box x: [0, 50], y: [0, 50] # such that the difference in", "{ \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1,", "x: [25, 75], y: [50, 80] # - one in box x: [0,", "(maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps()))", "import veritas import veritas.xgb # Generate a random dataset np.random.seed(14) N = 2000", "middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points =", "of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature", "2 # Use VERITAS to find the two output configurations # - one", "alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\",", "our \"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True)", "f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our", "error)\") # Look in a 100×100 grid at the values produced by XGBoost", "= { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\":", "opt.solutions(): # convert Solution object to list of intervals indexes by feature id", "Rectangle import veritas import veritas.xgb # Generate a random dataset np.random.seed(14) N =", "veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable", "opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions():", "0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1,", "= [\"x\", \"y\"] feat2id = {f : i for i, f in enumerate(features)}", "output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) >", "leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = []", "= np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be", "'.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target],", "leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1)", "# prune instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(),", "EXAMPLE 2 # Use VERITAS to find the two output configurations # -", "50] # such that the difference in output is maximized # This time,", "x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 =", "- pred_raw), \"(should be no more than float32 rounding error)\") # Look in", "veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) #", "in box x: [0, 50], y: [0, 50] # such that the difference", "'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T,", "0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable", "second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval", "time, share attribute x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]),", "\"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst = xgb.train(params, xtrain,", "instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50,", "100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 +", "yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2", "[(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id = {f : i for i,", "100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x + y", "is maximized # This time, share attribute x between the two instances opt", "color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) # Train", "interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True))", "np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2])", "matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate a random dataset np.random.seed(14)", "error\", max(pred_raw_at - pred_raw), \"(should be no more than float32 rounding error)\") #", "more than float32 rounding error)\") # Look in a 100×100 grid at the", "M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50,", "> 200) # Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.',", "by XGBoost Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for j,", "y**2) s = x + y target = ((dist < 50) & (s", "in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [", "x + y target = ((dist < 50) & (s > 20)) |", "] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before", "feat2id = {f : i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst)", "\"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst = xgb.train(params, xtrain, 10,", "80] # - one in box x: [0, 50], y: [0, 50] #", "label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\")", "label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\",", "VERITAS to find the two output configurations # - one in box x:", "# EXAMPLE 1 # Use VERITAS to find the two output configurations #", "opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80),", "max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to find the", "\"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst = xgb.train(params,", "plt.show() # EXAMPLE 2 # Use VERITAS to find the two output configurations", "prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune instance0 (minimized) opt.prune_box(box1, 1) # prune", "ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\",", "1, \"nthread\": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features =", "interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 =", "as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas import", "missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\":", "yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots()", "max(pred_raw_at - pred_raw), \"(should be no more than float32 rounding error)\") # Look", "opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): # convert Solution object", "1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s", "(ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target],", "# instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle", "4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 =", "in opt.solutions(): # convert Solution object to list of intervals indexes by feature", "sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0,", "#plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) # Train a", "print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points", "# Generate a random dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100,", "= 0.5 # Check whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at =", "= plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.',", "N = 2000 x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100,", "produced by XGBoost Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)): for", "= bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred =", "m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50,", "p in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]])", "marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show()", "# 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\",", "XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4,", "feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1", "predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\",", "[50, 80] # - one in box x: [0, 50], y: [0, 50]", "ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 #", "plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0),", "#plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y),", "= ((dist < 50) & (s > 20)) | ((x+2*y) > 200) #", "of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points)", "X = np.concatenate((x, y), axis=1) # Train a model using XGBoost xtrain =", "neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\")", "\"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain,", "output configurations # - one in box x: [25, 75], y: [50, 80]", "ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false neg\") im", "= ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]],", "Check whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw =", "i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether", "l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]],", "np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x +", "intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature", "im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use", "i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 #", "for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check", ": i for i, f in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5", "p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color())", "= np.concatenate((x, y), axis=1) # Train a model using XGBoost xtrain = xgb.DMatrix(X,", "and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at", "50) & (s > 20)) | ((x+2*y) > 200) # Plot the dataset", "np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2", "y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target],", "label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1", "'.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target],", "opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): #", "label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x', color=\"red\", label=\"false", "numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas", "at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our \"AddTree\"'s predictions and", "min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2", "color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to", "color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) # Train a model using XGBoost", "# prune instance0 (minimized) opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable leafs", "maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 =", "75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num", "x = np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist", "s = x + y target = ((dist < 50) & (s >", "configurations # - one in box x: [25, 75], y: [50, 80] #", "200) # Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\")", "than float32 rounding error)\") # Look in a 100×100 grid at the values", "veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our \"AddTree\"'s predictions and XGBoost's match", "ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true", "list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 =", "second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points)", "0.5 # Check whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X))", "\"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\":", "Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show() X", "= sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2 #", "= sum(intervals[1][0])/2 # instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 #", "sum(intervals[0][1])/2 # instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1:", "instance1: middle of first feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of", "# EXAMPLE 2 # Use VERITAS to find the two output configurations #", "xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\":", "first feature interval yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval", "\"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max", "xgboost as xgb import numpy as np import matplotlib.pyplot as plt from matplotlib.patches", "points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig,", "feature interval yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0,", "yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6))", "= veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our \"AddTree\"'s predictions and XGBoost's", "opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): # convert Solution", "\"hist\", \"seed\": 1, \"nthread\": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")])", "[\"x\", \"y\"] feat2id = {f : i for i, f in enumerate(features)} at", "that the difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True)", "(s > 20)) | ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target],", "yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0, xv1,", "80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs", "y target = ((dist < 50) & (s > 20)) | ((x+2*y) >", "import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas import veritas.xgb #", "feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of", "# - one in box x: [0, 50], y: [0, 50] # such", "instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points", "for sol in opt.solutions(): # convert Solution object to list of intervals indexes", "= x + y target = ((dist < 50) & (s > 20))", "50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p in", "- one in box x: [25, 75], y: [50, 80] # - one", "pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no more", "veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ]", "= [] for sol in opt.solutions(): # convert Solution object to list of", "50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p in points[:3]:", "to find the two output configurations # - one in box x: [25,", "prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.steps(2000) print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol", "whether our \"AddTree\"'s predictions and XGBoost's match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain,", "y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s =", "size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x + y target", "instance0: middle of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of", "[0, 50] # such that the difference in output is maximized # This", "\"seed\": 1, \"nthread\": 1, } bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features", "enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our \"AddTree\"'s predictions", "float32 rounding error)\") # Look in a 100×100 grid at the values produced", "& (s > 20)) | ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target],", "object to list of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4)", "np.concatenate((x, y), axis=1) # Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target,", "marker=\"^\", color=l.get_color()) # max fig.colorbar(im, ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS", "ax=ax) plt.show() # EXAMPLE 2 # Use VERITAS to find the two output", "for i, xv in enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] =", "output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T,", "plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\",", "cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\"))", "box x: [25, 75], y: [50, 80] # - one in box x:", "of second feature interval xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature", "[p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min ax.scatter([p[2]], [p[3]], marker=\"^\", color=l.get_color()) #", "Generate a random dataset np.random.seed(14) N = 2000 x = np.random.randint(0, 100, size=(N,", "\"y\"] feat2id = {f : i for i, f in enumerate(features)} at =", "# This time, share attribute x between the two instances opt = veritas.Optimizer(minimize=at,", "only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color()) # min", "((dist < 50) & (s > 20)) | ((x+2*y) > 200) # Plot", "yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig,", "3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]], [p[1]], marker=\"v\", color=l.get_color())", "y[~pred&target], 'x', color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show()", "# Train a model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params =", "opt.num_steps())) points = [] for sol in opt.solutions(): # convert Solution object to", "import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import", "matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75), veritas.RealDomain(50, 80), ] box1 = [", "in points[:3]: # 3 best only l, = ax.plot([p[0], p[2]], [p[1], p[3]]) ax.scatter([p[0]],", "from matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate a random dataset", "yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax", "between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [", "match pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw),", "= opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval", "fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to find the two", "at the values produced by XGBoost Xv = np.zeros((100*100, 2)) for i, xv", "# Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target], '.', color=\"red\") #plt.show()", "intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 #", "= xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"] feat2id = {f", "((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target], '.', color=\"blue\") #plt.plot(x[~target], y[~target],", "for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv),", "pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false", "color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x', color=\"blue\", label=\"false pos\") ax0.plot(x[~pred&target], y[~pred&target], 'x',", "\"max_depth\": 4, \"objective\": \"binary:hinge\", \"eval_metric\": \"error\", \"tree_method\": \"hist\", \"seed\": 1, \"nthread\": 1, }", "output is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25,", "30, fill=False, color=\"red\")) for p in points[:3]: # 3 best only l, =", "enumerate(range(100)): for j, yv in enumerate(range(100)): Xv[i*100+j, 0:2] = [xv, yv] vs =", "alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\") ax0.plot(x[pred&~target], y[pred&~target], 'x',", "vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred", "50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0, 0) # prune", "output is maximized # This time, share attribute x between the two instances", "# such that the difference in output is maximized opt = veritas.Optimizer(minimize=at, maximize=at,", "y[~target], '.', color=\"red\") #plt.show() X = np.concatenate((x, y), axis=1) # Train a model", "[ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices())", "print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps())) points = [] for sol in opt.solutions(): # convert", "import veritas.xgb # Generate a random dataset np.random.seed(14) N = 2000 x =", "= np.random.randint(0, 100, size=(N, 1)).astype(float) y = np.random.randint(0, 100, size=(N, 1)).astype(float) dist =", "origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE 1 # Use VERITAS to find", "fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) pred = (pred_raw.reshape((N,1)) > 0.0)", "matplotlib.pyplot as plt from matplotlib.patches import Rectangle import veritas import veritas.xgb # Generate", "opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0", "1, } bst = xgb.train(params, xtrain, 10, [(xtrain, \"train\")]) features = [\"x\", \"y\"]", "= np.random.randint(0, 100, size=(N, 1)).astype(float) dist = np.sqrt(x**2 + y**2) s = x", "box1 = [ veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\",", "fill=False, color=\"red\")) for p in points[:3]: # 3 best only l, = ax.plot([p[0],", "1 # Use VERITAS to find the two output configurations # - one", "Xv[i*100+j, 0:2] = [xv, yv] vs = bst.predict(xgb.DMatrix(Xv), output_margin=True) fig, (ax0, ax1) =", "origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\")) ax.add_patch(Rectangle((25, 50), 50, 30, fill=False,", "values produced by XGBoost Xv = np.zeros((100*100, 2)) for i, xv in enumerate(range(100)):", "no more than float32 rounding error)\") # Look in a 100×100 grid at", "sol in opt.solutions(): # convert Solution object to list of intervals indexes by", "model using XGBoost xtrain = xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5,", "print(points) #print(bst.predict(xgb.DMatrix(points), output_margin=True)) fig, ax = plt.subplots() m, M = abs(min(points[:,2])), max(points[:,2]) im", "abs(min(points[:,2])), max(points[:,2]) im = ax.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color=\"blue\"))", "dist = np.sqrt(x**2 + y**2) s = x + y target = ((dist", "= (pred_raw.reshape((N,1)) > 0.0) ax0.plot(x[pred&target], y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target],", "two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75),", "75], y: [50, 80] # - one in box x: [0, 50], y:", "print(\"max error\", max(pred_raw_at - pred_raw), \"(should be no more than float32 rounding error)\")", "rounding error)\") # Look in a 100×100 grid at the values produced by", "is maximized opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True) box0 = [ veritas.RealDomain(25, 75),", "pred_raw_at = np.array(at.predict(X)) pred_raw = bst.predict(xtrain, output_margin=True) print(\"max error\", max(pred_raw_at - pred_raw), \"(should", "[0, 50], y: [0, 50] # such that the difference in output is", "ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color=\"red\")) for p in points[:3]: # 3 best", "xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval yv0 = sum(intervals[0][1])/2", "y[pred&target], '.', color=\"darkblue\", alpha=0.5, label=\"true pos\") ax0.plot(x[~pred&~target], y[~pred&~target], '.', color=\"darkred\", alpha=0.5, label=\"true neg\")", "in enumerate(features)} at = veritas.xgb.addtree_from_xgb_model(bst) at.base_score = 0.5 # Check whether our \"AddTree\"'s", "# - one in box x: [25, 75], y: [50, 80] # -", "veritas.RealDomain(0, 50), veritas.RealDomain(0, 50), ] print(\"num reachable leafs before prune\", opt.g0.num_vertices(), opt.g1.num_vertices()) opt.prune_box(box0,", "of intervals indexes by feature id intervals = opt.solution_to_intervals(sol, 4) xv0 = sum(intervals[0][0])/2", "in a 100×100 grid at the values produced by XGBoost Xv = np.zeros((100*100,", "= sum(intervals[1][1])/2 # instance1: middle of second feature interval points.append([xv0, yv0, xv1, yv1,", "< 50) & (s > 20)) | ((x+2*y) > 200) # Plot the", "> 20)) | ((x+2*y) > 200) # Plot the dataset #plt.plot(x[target], y[target], '.',", "color=\"red\", label=\"false neg\") im = ax1.imshow(vs.reshape(100,100).T, origin=\"lower\", cmap=\"Spectral\") fig.colorbar(im, ax=ax1) plt.show() # EXAMPLE", "that the difference in output is maximized # This time, share attribute x", "share attribute x between the two instances opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True)", "feature interval points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1]) points = np.array(points) print(points) #print(bst.predict(xgb.DMatrix(points),", "# such that the difference in output is maximized # This time, share", "opt.prune_box(box1, 1) # prune instance1 (maximized) print(\"num reachable leafs after prune\", opt.g0.num_vertices(), opt.g1.num_vertices())", "xtrain = xgb.DMatrix(X, label=target, missing=None) params = { \"learning_rate\": 0.5, \"max_depth\": 4, \"objective\":" ]
[ "' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] +", "'-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0]", "' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"]", "+ '\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n' text += '---' with", "'doi: \"' + dic[\"url\"] + '\"\\n' text += 'note: \"' + dic[\"note\"] +", "if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for info", "in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1])", "')] for i in range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1]", "\"r\") as file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for article in articles:", "Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website", "= ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"]", "open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {} for info", "= parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\",", "'folder_name: \"' + folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name, \"index.md\"),", "'note: \"' + dic[\"note\"] + '\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n'", "'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' + dic[\"abstract\"] +", "help=\"Website directory for creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\",", "+= 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text += 'publication: \"' + dic[\"booktitle\"]", "= [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with", "article in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))):", "args.directory, \"index.md\"))): text = '---\\n' text += 'title: \"' + dic[\"title\"] + '\"\\n'", "'\"\\n' text += 'info: \"' + dic[\"info\"] + '\"\\n' text += 'doi: \"'", "to create website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #--", "full_text = file.read() articles = full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\")", "= full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if", "info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" +", "Read the system arguments listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read a", "\"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text =", "\"' + dic[\"abstract\"] + '\"\\n' text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n'", "listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"]", "else: list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0] + ', ' +", "\"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text = file.read() articles", "folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {}", "'publication: \"' + dic[\"booktitle\"] + '\"\\n' text += 'info: \"' + dic[\"info\"] +", "+ dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name))", "\"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for", "dic = {} for info in listtoread: dic[info] = \"\" if \"inproceedings\" in", "file.read() articles = full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\") folder_name =", "folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {} for info in listtoread: dic[info]", "+ dic[\"info\"] + '\"\\n' text += 'doi: \"' + dic[\"url\"] + '\"\\n' text", "+ list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and ' +", "lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name))", "parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\",", "<reponame>hulecom/hulecom.github.io import argparse import os #-- command line parameters #-- Read the system", "= \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\"", "+ dic[\"note\"] + '\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n' text +=", "\"index.md\"))): text = '---\\n' text += 'title: \"' + dic[\"title\"] + '\"\\n' text", "+ dic[\"url\"] + '\"\\n' text += 'note: \"' + dic[\"note\"] + '\"\\n' text", "line parameters #-- Read the system arguments listed after the program parser =", "+ '\\n' text += 'authors: \"' + dic[\"authors\"] + '\"\\n' text += 'publication_types:", "listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"]", "'\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\",", "+ '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract:", "in dic[\"author\"].split(' and ')] for i in range(len(list_author)): if '-' in list_author[i][1]: prenom", "not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text += 'title: \"' + dic[\"title\"] +", "system arguments listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file", "'\"\\n' text += 'note: \"' + dic[\"note\"] + '\"\\n' text += 'folder_name: \"'", "') for i in dic[\"author\"].split(' and ')] for i in range(len(list_author)): if '-'", "\"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for", "if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))):", "+ dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text", "break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(',", "\"-01\" list_author = [i.split(', ') for i in dic[\"author\"].split(' and ')] for i", "'\"\\n' text += 'doi: \"' + dic[\"url\"] + '\"\\n' text += 'note: \"'", "= argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website architecture for Hugo website", "not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text", "+ ' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"]", "list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] + \".\"", "+ dic[\"abstract\"] + '\"\\n' text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text", "os #-- command line parameters #-- Read the system arguments listed after the", "text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text += 'info: \"' +", "list_author = [i[0] + ', ' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")]", "in range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] +", "+= 'date: ' + dic[\"date\"] + '\\n' text += 'authors: \"' + dic[\"authors\"]", "= ', ' + dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"]", "{} for info in listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"]", "and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] + ', ' + dic[\"pages\"]", "argparse import os #-- command line parameters #-- Read the system arguments listed", "with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {} for", "+ dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"] =", "+ dic[\"month\"] + \"-01\" list_author = [i.split(', ') for i in dic[\"author\"].split(' and", "+ ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"]", "dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] + ',", "articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory,", "dic[\"date\"] + '\\n' text += 'authors: \"' + dic[\"authors\"] + '\"\\n' text +=", "in listtoread: for line in lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2]", "with open(args.file, \"r\") as file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for article", "= [i[0] + ', ' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] =", "' + dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ',", "output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread =", "[i[0] + ', ' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**'", "if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text += 'title: \"'", "line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author =", "'date: ' + dic[\"date\"] + '\\n' text += 'authors: \"' + dic[\"authors\"] +", "parameters #-- Read the system arguments listed after the program parser = argparse.ArgumentParser(", "not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as", "or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text += 'title: \"' + dic[\"title\"]", "dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory,", "\"1\" else: dic[\"publication_types\"] = \"2\" for info in listtoread: for line in lines:", "= prenom[0][0] + \".-\" + prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] +", "dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] + ', ' +", "description=\"\"\"Read a bibtex file to create website architecture for Hugo website \"\"\" )", "\"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text = file.read()", "list_author[i][1][0] + \".\" list_author = [i[0] + ', ' + i[1] for i", "list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0]", "list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0] + ', ' + i[1]", "dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text +=", "\"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file:", "= lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name,", "help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\",", "list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) +", "architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory", "args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file:", "+'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name, \"index.md\"), \"w\") as file: file.write(text)", "= list_author[i][1][0] + \".\" list_author = [i[0] + ', ' + i[1] for", "'title: \"' + dic[\"title\"] + '\"\\n' text += 'date: ' + dic[\"date\"] +", "', '.join(list_author[:-1]) + ' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"]", "+ '\"\\n' text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text += 'publication:", "'\\n' text += 'authors: \"' + dic[\"authors\"] + '\"\\n' text += 'publication_types: \"'", "\"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text", "text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' +", "in listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else:", "file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\",", "#-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread", "= '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and", "', ' + dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] =", "'\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"'", "lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for info in listtoread: for", "dic[\"abstract\"] + '\"\\n' text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text +=", "bibtex file to create website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex", "i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ',", "dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if", "+ '\"\\n' text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text += 'info:", "#-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\") #--", "' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and", "+ prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0]", "' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]:", "folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name, \"index.md\"), \"w\") as file:", "+ i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**'", "\"' + dic[\"note\"] + '\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n' text", "if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', '", "working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\") #-- output", "dic[\"booktitle\"] + '\"\\n' text += 'info: \"' + dic[\"info\"] + '\"\\n' text +=", "'\"\\n' text += 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text += 'info: \"'", "as file: file.writelines(article) dic = {} for info in listtoread: dic[info] = \"\"", "dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(', ') for i", "creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\")", "dic[\"info\"] + '\"\\n' text += 'doi: \"' + dic[\"url\"] + '\"\\n' text +=", "folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article)", ") parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating", "open(args.file, \"r\") as file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for article in", "[i.split(', ') for i in dic[\"author\"].split(' and ')] for i in range(len(list_author)): if", "'**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and '", "' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"]", "for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\",", "text = '---\\n' text += 'title: \"' + dic[\"title\"] + '\"\\n' text +=", "+ dic[\"booktitle\"] + '\"\\n' text += 'info: \"' + dic[\"info\"] + '\"\\n' text", "dic[\"publication_types\"] = \"2\" for info in listtoread: for line in lines: if info", "+ dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', '", "text += 'authors: \"' + dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' +", "\".\" list_author = [i[0] + ', ' + i[1] for i in list_author]", "dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(', ')", "+ folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name, \"index.md\"), \"w\") as", "the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website architecture", "i in dic[\"author\"].split(' and ')] for i in range(len(list_author)): if '-' in list_author[i][1]:", "if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" +", "+ ', ' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' +", "files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\",", "elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ',", "action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\",", "and ')] for i in range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-')", "dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] =", "+ list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] =", "prenom[0][0] + \".-\" + prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] + \".\"", "for i in range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] =", "as file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for article in articles: lines", "for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] =", "file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ =", "in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\",", "\"' + dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n'", "dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"]", "for line in lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"]", "+= 'authors: \"' + dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"]", "args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text += 'title: \"' +", "list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + '", "= article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with", "text += 'title: \"' + dic[\"title\"] + '\"\\n' text += 'date: ' +", "= ', ' + dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))):", "+ \"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(', ') for i in", "text += 'folder_name: \"' + folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory,", "dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' +", "text += 'doi: \"' + dic[\"url\"] + '\"\\n' text += 'note: \"' +", "arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args()", "\"2\" for info in listtoread: for line in lines: if info in line.split(\"=\")[0]:", "dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\",", "listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create", "arguments listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to", "in lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"]", "+= 'title: \"' + dic[\"title\"] + '\"\\n' text += 'date: ' + dic[\"date\"]", "= line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author", "= dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] +", "'info: \"' + dic[\"info\"] + '\"\\n' text += 'doi: \"' + dic[\"url\"] +", "+= 'publication_types: \"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' + dic[\"abstract\"]", "= '---\\n' text += 'title: \"' + dic[\"title\"] + '\"\\n' text += 'date:", "info in listtoread: for line in lines: if info in line.split(\"=\")[0]: dic[info] =", "[\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file,", "' + dic[\"date\"] + '\\n' text += 'authors: \"' + dic[\"authors\"] + '\"\\n'", "folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {} for info in listtoread:", "\"' + dic[\"info\"] + '\"\\n' text += 'doi: \"' + dic[\"url\"] + '\"\\n'", "args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\",", "i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"]", "+ '\"\\n' text += 'doi: \"' + dic[\"url\"] + '\"\\n' text += 'note:", "program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website architecture for", "if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\"", "args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text =", "+= 'doi: \"' + dic[\"url\"] + '\"\\n' text += 'note: \"' + dic[\"note\"]", "help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file", "parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the", "\".-\" + prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author =", "+= 'info: \"' + dic[\"info\"] + '\"\\n' text += 'doi: \"' + dic[\"url\"]", "= \"2\" for info in listtoread: for line in lines: if info in", "dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"] = ''", "\"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for info in", "website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory", "+ dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text", "+= 'publication: \"' + dic[\"booktitle\"] + '\"\\n' text += 'info: \"' + dic[\"info\"]", "a bibtex file to create website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\",", "text += 'date: ' + dic[\"date\"] + '\\n' text += 'authors: \"' +", "for i in dic[\"author\"].split(' and ')] for i in range(len(list_author)): if '-' in", "+ \".-\" + prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author", "listtoread: for line in lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break", "\"' + dic[\"booktitle\"] + '\"\\n' text += 'info: \"' + dic[\"info\"] + '\"\\n'", "line in lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] =", "dic[\"info\"] = ', ' + dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]:", "+ \".\" list_author = [i[0] + ', ' + i[1] for i in", "\"' + dic[\"url\"] + '\"\\n' text += 'note: \"' + dic[\"note\"] + '\"\\n'", "', ' + i[1] for i in list_author] list_author[list_author.index(\"<NAME>.\")] = '**' + list_author[list_author.index(\"<NAME>.\")]", "argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website architecture for Hugo website \"\"\"", "dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text +=", "range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\"", "\".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0] + ', '", "data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\") #-- output file", "prenom[1][0] + \".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0] +", "file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for article in articles: lines =", "for info in listtoread: for line in lines: if info in line.split(\"=\")[0]: dic[info]", "existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\",", "= list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] + \".\" else: list_author[i][1]", "elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else: dic[\"info\"] = '' if", "dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' +", "list_author = [i.split(', ') for i in dic[\"author\"].split(' and ')] for i in", "'.join(list_author[:-1]) + ' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if", "\"' + folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name, \"index.md\"), \"w\")", "' + dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory,", "+= 'folder_name: \"' + folder_name +'\"\\n' text += '---' with open(os.path.join(\"content\", args.directory, folder_name,", "in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] +", "lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"),", "the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_", "\"w\") as file: file.writelines(article) dic = {} for info in listtoread: dic[info] =", "dic[\"author\"].split(' and ')] for i in range(len(list_author)): if '-' in list_author[i][1]: prenom =", "\"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as", "dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for info in listtoread: for line", "\"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(', ') for i in dic[\"author\"].split('", "', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif", "dic[\"url\"] + '\"\\n' text += 'note: \"' + dic[\"note\"] + '\"\\n' text +=", "prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] + \".\" else:", "parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website architecture for Hugo", "+ '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and ' + list_author[-1] if", "= ', '.join(list_author[:-1]) + ' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] =", "article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\",", "+ dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"]", "command line parameters #-- Read the system arguments listed after the program parser", "folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text += 'title:", "text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text += 'publication: \"' +", "lines: if info in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] +", "os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text", "parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False,", "dic[\"title\"] + '\"\\n' text += 'date: ' + dic[\"date\"] + '\\n' text +=", "'\"\\n' text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n' text += 'publication: \"'", "+ \".\" else: list_author[i][1] = list_author[i][1][0] + \".\" list_author = [i[0] + ',", "for info in listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] =", "text += 'note: \"' + dic[\"note\"] + '\"\\n' text += 'folder_name: \"' +", "if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] + ', '", "else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite", "os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic", "\"url\", \"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text = file.read() articles =", "+ dic[\"title\"] + '\"\\n' text += 'date: ' + dic[\"date\"] + '\\n' text", "for creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite existing", "dic[\"volume\"]: dic[\"info\"] = ', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', '", "import os #-- command line parameters #-- Read the system arguments listed after", "\"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\", \"note\"] with open(args.file, \"r\")", "', ' + dic[\"pages\"] else: dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\",", "default=False, action=\"store_true\", help=\"Overwrite existing files\") args,_ = parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\",", "'abstract: \"' + dic[\"abstract\"] + '\"\\n' text += 'publication: \"' + dic[\"booktitle\"] +", "line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] +", "dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] =", "= {} for info in listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]:", "'authors: \"' + dic[\"authors\"] + '\"\\n' text += 'publication_types: \"' + dic[\"publication_types\"] +", "\"abstract\", \"note\"] with open(args.file, \"r\") as file: full_text = file.read() articles = full_text.split(\"\\n\\n\")", "'---\\n' text += 'title: \"' + dic[\"title\"] + '\"\\n' text += 'date: '", "dic[\"info\"] = '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or", "+ dic[\"date\"] + '\\n' text += 'authors: \"' + dic[\"authors\"] + '\"\\n' text", "and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]:", "\"' + dic[\"publication_types\"] + '\"\\n' text += 'abstract: \"' + dic[\"abstract\"] + '\"\\n'", "+ '\"\\n' text += 'date: ' + dic[\"date\"] + '\\n' text += 'authors:", "if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\")", "', ' + dic[\"volume\"] elif dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"pages\"] else:", "dic[\"month\"] + \"-01\" list_author = [i.split(', ') for i in dic[\"author\"].split(' and ')]", "for article in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory,", "'**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and ' + list_author[-1] if dic[\"journal\"]:", "folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) with open(os.path.join(\"content\", args.directory,", "dic[\"note\"] + '\"\\n' text += 'folder_name: \"' + folder_name +'\"\\n' text += '---'", "folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n'", "the system arguments listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex", "'\"\\n' text += 'date: ' + dic[\"date\"] + '\\n' text += 'authors: \"'", "file.writelines(article) dic = {} for info in listtoread: dic[info] = \"\" if \"inproceedings\"", "else: dic[\"publication_types\"] = \"2\" for info in listtoread: for line in lines: if", "dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\"", "= file.read() articles = full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\") folder_name", "args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory, \"index.md\"))): text = '---\\n' text +=", "directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\",", "parser.parse_known_args() listtoread = [\"title\", \"year\", \"month\", \"author\", \"booktitle\", \"journal\", \"volume\", \"pages\", \"url\", \"abstract\",", "import argparse import os #-- command line parameters #-- Read the system arguments", "= '' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\",", "articles = full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1]", "in line.split(\"=\")[0]: dic[info] = line.split(\"{\")[1][:-2] break dic[\"date\"] = dic[\"year\"] + \"-\" + dic[\"month\"]", "\"' + dic[\"title\"] + '\"\\n' text += 'date: ' + dic[\"date\"] + '\\n'", "dic[\"volume\"] + ', ' + dic[\"pages\"] elif dic[\"volume\"]: dic[\"info\"] = ', ' +", "'' if not(os.path.isdir(os.path.join(\"content\", args.directory, folder_name))): os.mkdir(os.path.join(\"content\", args.directory, folder_name)) if args.overwrite or not(os.path.isfile(os.path.join(\"content\", args.directory,", "= dic[\"year\"] + \"-\" + dic[\"month\"] + \"-01\" list_author = [i.split(', ') for", "args.directory, folder_name)) with open(os.path.join(\"content\", args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic =", "\"note\"] with open(args.file, \"r\") as file: full_text = file.read() articles = full_text.split(\"\\n\\n\") for", "args.directory, folder_name, folder_name+\".bib\"), \"w\") as file: file.writelines(article) dic = {} for info in", "+ '\"\\n' text += 'note: \"' + dic[\"note\"] + '\"\\n' text += 'folder_name:", "+ '\"\\n' text += 'info: \"' + dic[\"info\"] + '\"\\n' text += 'doi:", "after the program parser = argparse.ArgumentParser( description=\"\"\"Read a bibtex file to create website", "directory for creating the file arborescence\") #-- output file parser.add_argument(\"--overwrite\",\"-O\", default=False, action=\"store_true\", help=\"Overwrite", "#-- Read the system arguments listed after the program parser = argparse.ArgumentParser( description=\"\"\"Read", "file: file.writelines(article) dic = {} for info in listtoread: dic[info] = \"\" if", "dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and ' + list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"]", "list_author[i][1].split('-') list_author[i][1] = prenom[0][0] + \".-\" + prenom[1][0] + \".\" else: list_author[i][1] =", "#-- command line parameters #-- Read the system arguments listed after the program", "info in listtoread: dic[info] = \"\" if \"inproceedings\" in lines[0]: dic[\"publication_types\"] = \"1\"", "+ \"-01\" list_author = [i.split(', ') for i in dic[\"author\"].split(' and ')] for", "in lines[0]: dic[\"publication_types\"] = \"1\" else: dic[\"publication_types\"] = \"2\" for info in listtoread:", "list_author[-1] if dic[\"journal\"]: dic[\"booktitle\"] = dic[\"journal\"] if dic[\"volume\"] and dic[\"pages\"]: dic[\"info\"] = ',", "file\") #-- working data directory parser.add_argument(\"--directory\",\"-D\", help=\"Website directory for creating the file arborescence\")", "dic[\"pages\"]: dic[\"info\"] = ', ' + dic[\"volume\"] + ', ' + dic[\"pages\"] elif", "file to create website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\")", "create website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working", "full_text.split(\"\\n\\n\") for article in articles: lines = article.split(\"\\n\") folder_name = lines[0].split(\"{\")[1][:-1] if not(os.path.isdir(os.path.join(\"content\",", "website architecture for Hugo website \"\"\" ) parser.add_argument(\"--file\",\"-f\", help=\"bibtex file\") #-- working data", "= \"1\" else: dic[\"publication_types\"] = \"2\" for info in listtoread: for line in", "text += 'info: \"' + dic[\"info\"] + '\"\\n' text += 'doi: \"' +", "+= 'note: \"' + dic[\"note\"] + '\"\\n' text += 'folder_name: \"' + folder_name", "= [i.split(', ') for i in dic[\"author\"].split(' and ')] for i in range(len(list_author)):", "i in range(len(list_author)): if '-' in list_author[i][1]: prenom = list_author[i][1].split('-') list_author[i][1] = prenom[0][0]", "list_author[list_author.index(\"<NAME>.\")] + '**' dic[\"authors\"] = ', '.join(list_author[:-1]) + ' and ' + list_author[-1]" ]
[ "random import choice, uniform from time import time from p2ner.abstract.scheduler import Scheduler from", "LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False):", "#while There are blocks to request while len(requestableBlocks) > 0: #get the block", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "= self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING", "#self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\"", "permissions and # limitations under the License. from twisted.internet import task, reactor from", "def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def", "#print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for peer in neighbours:", "to request while len(requestableBlocks) > 0: #get the block with less sources block", "missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for peer in", "initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency =", "EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {} for peer in requests: if", "peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] =", "this file except in compliance with the License. # You may obtain a", "#print self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def", "#print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b", "if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return", "{} for k in keys: blocksToRequest[k] = [] #take out blocks with only", "self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop()", "#reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours", "messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest", "peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks,", "d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if not req:", "norequests = False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send", "produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req):", "def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency", "blocksToRequest[k] = [] #take out blocks with only 1 source reqBlockList = requestableBlocks.keys()", "self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None,", "task, reactor from twisted.internet.threads import deferToThread from random import choice, uniform from time", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from", "ANY KIND, either express or implied. # See the License for the specific", "requestOne(requests): blocksToRequest = {} for peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer]", "missingBlocks, neighbours): for bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid", "in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print", "for b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer]", "outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print", "tempReq #print 'temp:',tempReq for b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else:", "Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage", "missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer in", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks", "#print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for", "from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from", "reactor from twisted.internet.threads import deferToThread from random import choice, uniform from time import", "the block with less sources block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1]", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for", "There are blocks to request while len(requestableBlocks) > 0: #get the block with", "def shift(self, norequests = False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if not", "blocksToRequest = {} for peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer] =", "= buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in tempReq: if b", "else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k in", "neighbours: if self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s", "in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return", "OF ANY KIND, either express or implied. # See the License for the", "less sources block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer", "peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running = False #print \"STOP", "request while len(requestableBlocks) > 0: #get the block with less sources block =", "= [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys: blocksToRequest[k]", "bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid)", "def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def", "scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer", "self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self):", "#print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks:", "blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler", "tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in tempReq: if", "if \"buffer\" not in peer.s[self.stream.id]: print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"]", "peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to request while", "sources block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer with", "b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest =", "(min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print", "askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to", "messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME =", "b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys", "isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer):", "1', peer, peer.s print neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print 'in", "if not norequests: #send buffers if self.buffer.lpb % self.reqInterval == 0: d =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in tempReq: if b in requestableBlocks:", "def requestOne(requests): blocksToRequest = {} for peer in requests: if len(requests[peer]) > 1:", "len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks", "= blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "twisted.internet import task, reactor from twisted.internet.threads import deferToThread from random import choice, uniform", "'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to ',peer,block,fragments b={}", "getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage", "= {} for peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])]", "BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from block import Block", "requestableBlocks = {} for peer in neighbours: if self.stream.id not in peer.s: print", "far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1]", "import choice, uniform from time import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID,", "str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours continue if \"buffer\" not in", "peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer):", "self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for peer", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID,", "blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def", "= False #print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) >", "not norequests: #send buffers if self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\",", "block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running", "self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return", "import task, reactor from twisted.internet.threads import deferToThread from random import choice, uniform from", "blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage())", "are blocks to request while len(requestableBlocks) > 0: #get the block with less", "neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks,", "required by applicable law or agreed to in writing, software # distributed under", "out blocks with only 1 source reqBlockList = requestableBlocks.keys() for b in reqBlockList:", "= min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block]", "applicable law or agreed to in writing, software # distributed under the License", "len(requestableBlocks) > 0: #get the block with less sources block = min([ (len(requestableBlocks[x]),x)", "{} for peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else:", "receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def", "deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests):", "n = self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb", "1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\"", "or agreed to in writing, software # distributed under the License is distributed", "self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests =", "self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb % self.reqInterval == 0: d", "so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in", "self.running = False return None bid, peer = req #self.log.debug('sending block %d to", "= peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer with min(less possible requests,", "#print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running = False", "del requestableBlocks[b] #while There are blocks to request while len(requestableBlocks) > 0: #get", "for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\",", "def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self,", "import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import", "if not req: self.running = False return None bid, peer = req #self.log.debug('sending", "if peer is None: self.running = False #print \"STOP SERVING\\n\\n\" return None bl", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys: blocksToRequest[k] = [] #take out", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "self.buffer) if peer is None: self.running = False #print \"STOP SERVING\\n\\n\" return None", "outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe)", "uniform from time import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer", "[] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift)", "#send buffers if self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback", "stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks,", "the License. from twisted.internet import task, reactor from twisted.internet.threads import deferToThread from random", "peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest)", "#print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID", "is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except:", "compliance with the License. # You may obtain a copy of the License", "self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self):", "Copyright 2012 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0", "x in requestableBlocks])[1] #get the peer with min(less possible requests, less requests so", "= 0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval:", "1 source reqBlockList = requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) == 1:", "self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata", "requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b)", "outID = self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb % self.reqInterval ==", "# -*- coding: utf-8 -*- # Copyright 2012 <NAME>, <NAME> # # Licensed", "requests, less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send,", "self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def", "neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours():", "blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID", "less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if", "deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if not req: self.running =", "'in cotinue 1', peer, peer.s print neighbours continue if \"buffer\" not in peer.s[self.stream.id]:", "= 0.5 def requestOne(requests): blocksToRequest = {} for peer in requests: if len(requests[peer])", "from time import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from", "self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB", "d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id,", "not use this file except in compliance with the License. # You may", "self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running =", "blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self):", "ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to ',peer,block,fragments b={} b['blockid']=block", "peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours continue if \"buffer\"", "block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while", "receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {}", "for peer in neighbours: if self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue", "License, Version 2.0 (the \"License\"); # you may not use this file except", "blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for peer in neighbours: if self.stream.id", "d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe)", "self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure):", "tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests =", "bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print", "'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks)", "requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k", "missingBlocks, neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer,", "return d def sendBlock(self, req): if not req: self.running = False return None", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING", "self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from", "def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if bid in receivingBlocks:", "continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer]", "License. from twisted.internet import task, reactor from twisted.internet.threads import deferToThread from random import", "requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in blocksToRequest: if", "to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def", "< self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for", "# you may not use this file except in compliance with the License.", "sendBlock(self, req): if not req: self.running = False return None bid, peer =", "peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del", "[] #take out blocks with only 1 source reqBlockList = requestableBlocks.keys() for b", "agreed to in writing, software # distributed under the License is distributed on", "return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print", "time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq", "else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping')", "k in keys: blocksToRequest[k] = [] #take out blocks with only 1 source", "'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours):", "#self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write)", "(the \"License\"); # you may not use this file except in compliance with", "bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest", "RetransmitMessage from block import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {}", "if bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks", "return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours)", "= self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb %", "== 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to", "= False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send buffers", "'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {} for peer in neighbours: if", "= requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage())", "% self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests)", "= [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages", "# Unless required by applicable law or agreed to in writing, software #", "neighbours): for bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid <", "while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running", "buffers if self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests,", "self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05),", "by applicable law or agreed to in writing, software # distributed under the", "bid, peer = req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer)", "n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n,", "from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME", "in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {}", "import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from block", "self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self,", "= requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0]", "blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in blocksToRequest: if len(blocksToRequest[peer]):", "def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def", "blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks =", "0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print", "requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block", "tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in", "not in peer.s[self.stream.id]: print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh", "peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer)", "for b in reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del", "else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = []", "peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer,", "for peer in requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer]", "neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer),", "None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\",", "<NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "-*- # Copyright 2012 <NAME>, <NAME> # # Licensed under the Apache License,", "continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq", "file except in compliance with the License. # You may obtain a copy", "self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata =", "import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import", "import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {} for peer in", "= req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self):", "%s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False): n = self.overlay.getNeighbours()", "BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to", "governing permissions and # limitations under the License. from twisted.internet import task, reactor", "self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks,", "blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self,", "'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in", "LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False): n = self.overlay.getNeighbours() outID", "License for the specific language governing permissions and # limitations under the License.", "specific language governing permissions and # limitations under the License. from twisted.internet import", "from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from", "False #print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0:", "self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print", "= getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running = False #print \"STOP SERVING\\n\\n\"", "to in writing, software # distributed under the License is distributed on an", "in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys =", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n)", "to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print", "source reqBlockList = requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) == 1: peer", "\"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if not", "%s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist", "the specific language governing permissions and # limitations under the License. from twisted.internet", "peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0,", "self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send,", "not req: self.running = False return None bid, peer = req #self.log.debug('sending block", "or implied. # See the License for the specific language governing permissions and", "output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer):", "except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print", "failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= False return None bid, peer = req #self.log.debug('sending block %d to %s',bid,peer)", "with min(less possible requests, less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for", "',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to ',peer,block,fragments b={} b['blockid']=block b['fragments']=fragments self.trafficPipe.call('sendFragments',self,b,peer)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "[] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running", "in writing, software # distributed under the License is distributed on an \"AS", "0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest", "= {} for peer in neighbours: if self.stream.id not in peer.s: print str(self.stream.id),", "in requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer]", "utf-8 -*- # Copyright 2012 <NAME>, <NAME> # # Licensed under the Apache", "None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID)", "import deferToThread from random import choice, uniform from time import time from p2ner.abstract.scheduler", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec']", "print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to ',peer,block,fragments", "self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer)", "0.5 def requestOne(requests): blocksToRequest = {} for peer in requests: if len(requests[peer]) >", "while len(requestableBlocks) > 0: #get the block with less sources block = min([", "with only 1 source reqBlockList = requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b])", "0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] =", "x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest", "buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in tempReq:", "not in peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours continue", "== 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else:", "self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing", "pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks", "self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\"", "requests: if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return", "self.controlPipe) def shift(self, norequests = False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if", "= [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler')", "def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "#print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks,", "with less sources block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the", "if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0", "you may not use this file except in compliance with the License. #", "\"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if", "requestableBlocks[b] #while There are blocks to request while len(requestableBlocks) > 0: #get the", "> 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler):", "#return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05),", "self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback)", "[peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys: blocksToRequest[k] =", "cotinue 1', peer, peer.s print neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print", "block with less sources block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get", "len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class", "1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def", "for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self,", "failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return", "blocksToRequest = {} for k in keys: blocksToRequest[k] = [] #take out blocks", "from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from", "<reponame>schristakidis/p2ner<filename>p2ner/components/scheduler/spullclient/spullclient/core.py # -*- coding: utf-8 -*- # Copyright 2012 <NAME>, <NAME> # #", "def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if", "\"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None:", "= self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should", "if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are", "peerWithRequests = 0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests >", "tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys()", "self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for", "self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def", "receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif", "= deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if not req: self.running", "#get the block with less sources block = min([ (len(requestableBlocks[x]),x) for x in", "use this file except in compliance with the License. # You may obtain", "in neighbours: if self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue 1', peer,", "#take out blocks with only 1 source reqBlockList = requestableBlocks.keys() for b in", "> self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours)", "len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd,", "self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer", "getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running = False #print \"STOP SERVING\\n\\n\" return", "False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def", "def errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock)", "= task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return", "return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self):", "shift(self, norequests = False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests:", "buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for b in tempReq: if b in", "from twisted.internet.threads import deferToThread from random import choice, uniform from time import time", "peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is", "in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "{} requestableBlocks = {} for peer in neighbours: if self.stream.id not in peer.s:", "under the License. from twisted.internet import task, reactor from twisted.internet.threads import deferToThread from", "if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks,", "> 0: #get the block with less sources block = min([ (len(requestableBlocks[x]),x) for", "for x in requestableBlocks])[1] #get the peer with min(less possible requests, less requests", "print str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours continue if \"buffer\" not", "2012 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the", "LPBMessage from messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME = 0.5 def", "requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys:", "return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID)", "peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else:", "= choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return", "2.0 (the \"License\"); # you may not use this file except in compliance", "(blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler", "blocks to request while len(requestableBlocks) > 0: #get the block with less sources", "starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass", "SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block())", "keys = tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys: blocksToRequest[k] = []", "only 1 source reqBlockList = requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) ==", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "for k in keys: blocksToRequest[k] = [] #take out blocks with only 1", "sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self,", "the peer with min(less possible requests, less requests so far) peer = min([", "possible requests, less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in", "self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt']", "keys: blocksToRequest[k] = [] #take out blocks with only 1 source reqBlockList =", "self.running = False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer =", "blocksToRequest peerWithRequests = 0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests", "blocks with only 1 source reqBlockList = requestableBlocks.keys() for b in reqBlockList: if", "# # Unless required by applicable law or agreed to in writing, software", "limitations under the License. from twisted.internet import task, reactor from twisted.internet.threads import deferToThread", "self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall", "#print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[]", "express or implied. # See the License for the specific language governing permissions", "return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks", "-*- coding: utf-8 -*- # Copyright 2012 <NAME>, <NAME> # # Licensed under", "return None bid, peer = req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self,", "peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self,", "either express or implied. # See the License for the specific language governing", "outID) outdata.addCallback(self.output.write) def isRunning(self): return self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid", "from twisted.internet import task, reactor from twisted.internet.threads import deferToThread from random import choice,", "p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage", "to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False): n =", "self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False", "Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg import LPBMessage", "reqBlockList = requestableBlocks.keys() for b in reqBlockList: if len(requestableBlocks[b]) == 1: peer =", "\"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid", "reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"buffer\" not in peer.s[self.stream.id]: print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print", "buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block", "self.running = False #print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl)", "start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try:", "in keys: blocksToRequest[k] = [] #take out blocks with only 1 source reqBlockList", "import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import", "p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage", "p2ner.base.Buffer import Buffer from p2ner.base.BufferList import getMostDeprivedReq from messages.buffermessage import BufferMessage from messages.lpbmsg", "from random import choice, uniform from time import time from p2ner.abstract.scheduler import Scheduler", "peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is", "'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push", "> 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"]", "choice, uniform from time import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import", "self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours", "the License. # You may obtain a copy of the License at #", "dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid)", "return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self,", "else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print", "in peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s print neighbours continue if", "= min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer with min(less possible", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks = {}", "import LPBMessage from messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME = 0.5", "peer.s print neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print 'in continue 2'", "peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests", "def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest =", "self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False): n = self.overlay.getNeighbours() outID =", "Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID)", "peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer)", "= False self.registerMessages() self.loopingCall = task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log)", "\"SENDING BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def", "peer in neighbours: if self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue 1',", "missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if", "from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should retransmit to ',peer,block,fragments b={} b['blockid']=block b['fragments']=fragments", "req): if not req: self.running = False return None bid, peer = req", "for bid in missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb:", "if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] = [peer] keys = tmpBlocksToRequest.keys() blocksToRequest", "sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe)", "in peer.s[self.stream.id]: print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer", "#print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is", "SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl)", "blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def", "requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe) def", "print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq =", "= self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb % self.reqInterval == 0:", "d def sendBlock(self, req): if not req: self.running = False return None bid,", "twisted.internet.threads import deferToThread from random import choice, uniform from time import time from", "peer = req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def", "{} for peer in neighbours: if self.stream.id not in peer.s: print str(self.stream.id), 'in", "= Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d =", "#get the peer with min(less possible requests, less requests so far) peer =", "reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There", "(self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer,", "peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print 'temp:',tempReq for", "#exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if bid", "if self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(),", "from block import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {} for", "with the License. # You may obtain a copy of the License at", "peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self): self.log.info('scheduler is starting')", "None bid, peer = req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid,", "self.loopingCall.running def askFragments(self,bid,fragments,peer): print 'should ask from ',peer,fragments,bid RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe) def retransmit(self,block,fragments,peer): print 'should", "#print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1", "bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist,", "missingBlocks: if bid in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "BLOCK\", blockID, peer peer.s[self.stream.id][\"luck\"] = blockID return (blockID, peer) else: peer.s[self.stream.id][\"request\"]=[] def start(self):", "makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving", "requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb,", "self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer =", "blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to request while len(requestableBlocks) > 0:", "= [] #take out blocks with only 1 source reqBlockList = requestableBlocks.keys() for", "deferToThread from random import choice, uniform from time import time from p2ner.abstract.scheduler import", "norequests: #send buffers if self.buffer.lpb % self.reqInterval == 0: d = self.trafficPipe.call(\"getreceiving\", self)", "class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = []", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "req: self.running = False return None bid, peer = req #self.log.debug('sending block %d", "receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer in self.overlay.getNeighbours(): reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "if len(requests[peer]) > 1: blocksToRequest[peer] = [choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest", "block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for", "self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\",", "d = self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending", "requestableBlocks])[1] #get the peer with min(less possible requests, less requests so far) peer", "= {} for k in keys: blocksToRequest[k] = [] #take out blocks with", "blocksToRequest: if len(blocksToRequest[peer]): peerWithRequests+=1 if peerWithRequests > self.reqInterval: blocksToRequest = requestOne(blocksToRequest) return blocksToRequest", "elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest = {} requestableBlocks =", "block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer with min(less", "tmpBlocksToRequest = {} requestableBlocks = {} for peer in neighbours: if self.stream.id not", "requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage())", "peer with min(less possible requests, less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x)", "in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests", "in compliance with the License. # You may obtain a copy of the", "%d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print \"GETREQUESTEDBID\" while True:", "self.buffer, requests.get(peer), peer, self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "min(less possible requests, less requests so far) peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x", "2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] =", "message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe) def shift(self, norequests = False): n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "getRequestedBID(self): #print \"GETREQUESTEDBID\" while True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer", "in receivingBlocks: missingBlocks.remove(bid) elif bid < self.buffer.flpb: missingBlocks.remove(bid) #print 'missing blocks:',missingBlocks tmpBlocksToRequest =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "and # limitations under the License. from twisted.internet import task, reactor from twisted.internet.threads", "#push block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID) outdata.addCallback(self.output.write) def isRunning(self): return", "def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self,", "See the License for the specific language governing permissions and # limitations under", "True: #print self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running =", "def start(self): self.log.info('scheduler is starting') self.loopingCall.start(self.frequency) def stop(self): self.log.info('scheduler is stopping') #reactor.callLater(0, self.stream.stop)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks,", "from messages.retransmitmessage import RetransmitMessage from block import Block EXPIRE_TIME = 0.5 def requestOne(requests):", "missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude receiving def dd(self,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq = buffer.bIDListCompTrue(missingBlocks) tmpBlocksToRequest[peer] = tempReq #print", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "block import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {} for peer", "BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output", "len(bl) > 0: blockID = choice(bl) peer.s[self.stream.id][\"request\"].remove(blockID) peer.s[self.stream.id][\"buffer\"].update(blockID) #print \"SENDING BLOCK\", blockID, peer", "= {} requestableBlocks = {} for peer in neighbours: if self.stream.id not in", "import BufferMessage from messages.lpbmsg import LPBMessage from messages.retransmitmessage import RetransmitMessage from block import", "peer.s[self.stream.id]: print 'in continue 2' continue buffer = peer.s[self.stream.id][\"buffer\"] #print 'neigh buffer:',buffer tempReq", "d.addErrback(self.errback) else: #print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer)", "in requestableBlocks])[1] #get the peer with min(less possible requests, less requests so far)", "self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\", self, outID)", "peer, peer.s print neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print 'in continue", "n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to output outdata = self.trafficPipe.call(\"popblockdata\", self,", "= requestOne(blocksToRequest) return blocksToRequest return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks,", "#print 'temp:',tempReq for b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b]", "= self.trafficPipe.call(\"getreceiving\", self) d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n) d.addCallback(self.sendRequests) d.addErrback(self.errback) else: #print 'sending buffer'", "reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer #push block to", "import RetransmitMessage from block import Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest =", "0: #get the block with less sources block = min([ (len(requestableBlocks[x]),x) for x", "del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer in blocksToRequest:", "#print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if", "d.addCallback(self.sendBlock) d.addErrback(self.errback) return d def sendBlock(self, req): if not req: self.running = False", "Block EXPIRE_TIME = 0.5 def requestOne(requests): blocksToRequest = {} for peer in requests:", "# Copyright 2012 <NAME>, <NAME> # # Licensed under the Apache License, Version", "'temp:',tempReq for b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer) else: requestableBlocks[b] =", "Version 2.0 (the \"License\"); # you may not use this file except in", "for the specific language governing permissions and # limitations under the License. from", "except in compliance with the License. # You may obtain a copy of", "False): n = self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send buffers if", "= 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self): #print", "self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d", "print neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print 'in continue 2' continue", "#print 'sending buffer' reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe) #self.log.debug('%s',self.buffer) #print self.buffer", "language governing permissions and # limitations under the License. from twisted.internet import task,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "is None: self.running = False #print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"])", "= [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages() self.loopingCall =", "d.addErrback(self.errback) return d def sendBlock(self, req): if not req: self.running = False return", "b in reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b]", "in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block) #print \"BLOCKSTOREQUESTSSSS\", blocksToRequest peerWithRequests = 0 for peer", "REQUESTS\" #print missingBlocks #exclude receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in", "requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to request while len(requestableBlocks) >", "self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self): self.log.info('initing scheduler') self.running = False self.registerMessages()", "req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\", self, bid, peer) def getRequestedBID(self): #print", "def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print 'neighbours:',neighbours #print \"COMPUTING REQUESTS\" #print missingBlocks #exclude", "receiving def dd(self, receivingBlocks, missingBlocks, neighbours): for bid in missingBlocks: if bid in", "min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1] del requestableBlocks[block] blocksToRequest[peer].append(block)", "# limitations under the License. from twisted.internet import task, reactor from twisted.internet.threads import", "task.LoopingCall(self.shift) self.reqInterval=self.stream.scheduler['reqInt'] self.frequency = 1.0/self.stream.scheduler['blocksec'] self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log) def errback(self, failure): return failure", "peer is None: self.running = False #print \"STOP SERVING\\n\\n\" return None bl =", "(len(requestableBlocks[x]),x) for x in requestableBlocks])[1] #get the peer with min(less possible requests, less", "= tempReq #print 'temp:',tempReq for b in tempReq: if b in requestableBlocks: requestableBlocks[b].append(peer)", "coding: utf-8 -*- # Copyright 2012 <NAME>, <NAME> # # Licensed under the", "1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to request", "stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours): #print", "False return None bid, peer = req #self.log.debug('sending block %d to %s',bid,peer) self.trafficPipe.call(\"sendblock\",", "\"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if len(bl) > 0: blockID =", "self.overlay.getNeighbours() outID = self.buffer.shift() if not norequests: #send buffers if self.buffer.lpb % self.reqInterval", "in reqBlockList: if len(requestableBlocks[b]) == 1: peer = requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while", "peer, self.controlPipe) def shift(self, norequests = False): n = self.overlay.getNeighbours() outID = self.buffer.shift()", "None: self.running = False #print \"STOP SERVING\\n\\n\" return None bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id][\"request\"]) if", "self.controlPipe) def sendLPB(self, peer): self.log.warning('sending LPB message to %s',peer) LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe)", "receivingBlocks, missingBlocks, neighbours) #return dd(self, receivingBlocks, missingBlocks, neighbours) def sendRequests(self, requests): for peer", "if self.stream.id not in peer.s: print str(self.stream.id), 'in cotinue 1', peer, peer.s print", "continue if \"buffer\" not in peer.s[self.stream.id]: print 'in continue 2' continue buffer =", "is stopping') #reactor.callLater(0, self.stream.stop) try: self.loopingCall.stop() except: pass def makeRequests(self, receivingBlocks, missingBlocks, neighbours):", "= tmpBlocksToRequest.keys() blocksToRequest = {} for k in keys: blocksToRequest[k] = [] #take", "time import time from p2ner.abstract.scheduler import Scheduler from p2ner.base.Buffer import Buffer from p2ner.base.BufferList", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback) return d", "= requestableBlocks[b][0] blocksToRequest[peer].append(b) del requestableBlocks[b] #while There are blocks to request while len(requestableBlocks)", "self.bufferlist peer = getMostDeprivedReq(self.bufferlist, self.buffer) if peer is None: self.running = False #print", "registerMessages(self): self.messages = [] self.messages.append(BufferMessage()) self.messages.append(LPBMessage()) self.messages.append(RetransmitMessage()) self.blocks = [] self.blocks.append(Block()) def initScheduler(self):", "errback(self, failure): return failure def produceBlock(self): #print \"PRODUCEBLOCK\" d = deferToThread(self.getRequestedBID) d.addCallback(self.sendBlock) d.addErrback(self.errback)", "neighbours continue if \"buffer\" not in peer.s[self.stream.id]: print 'in continue 2' continue buffer", "def sendBlock(self, req): if not req: self.running = False return None bid, peer", "[choice(requests[peer])] else: blocksToRequest[peer] = requests[peer] return blocksToRequest class SPullClient(Scheduler): def registerMessages(self): self.messages =" ]
[ "from pyramid.testing import DummyRequest from pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test", "formatter.\"\"\" with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x)", "config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\",", "test_add_formatter() -> None: \"\"\"Test registration of a custom formatter.\"\"\" with testConfig() as config:", "pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test registration of a custom formatter.\"\"\"", "-> None: \"\"\"Test registration of a custom formatter.\"\"\" with testConfig() as config: request", "testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter =", "a custom formatter.\"\"\" with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda", "as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get(", "DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None ) assert", "= DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None )", "registering custom formatters.\"\"\" from pyramid.testing import DummyRequest from pyramid.testing import testConfig def test_add_formatter()", "None: \"\"\"Test registration of a custom formatter.\"\"\" with testConfig() as config: request =", "import testConfig def test_add_formatter() -> None: \"\"\"Test registration of a custom formatter.\"\"\" with", "\"\"\"Test registration of a custom formatter.\"\"\" with testConfig() as config: request = DummyRequest()", "config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None ) assert formatter(\"foo\") ==", "with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter", "pyramid.testing import DummyRequest from pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test registration", "of a custom formatter.\"\"\" with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\",", "testConfig def test_add_formatter() -> None: \"\"\"Test registration of a custom formatter.\"\"\" with testConfig()", "\"\"\"Tests for registering custom formatters.\"\"\" from pyramid.testing import DummyRequest from pyramid.testing import testConfig", "for registering custom formatters.\"\"\" from pyramid.testing import DummyRequest from pyramid.testing import testConfig def", "def test_add_formatter() -> None: \"\"\"Test registration of a custom formatter.\"\"\" with testConfig() as", "lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None ) assert formatter(\"foo\") == \"foo\"", "custom formatters.\"\"\" from pyramid.testing import DummyRequest from pyramid.testing import testConfig def test_add_formatter() ->", "formatters.\"\"\" from pyramid.testing import DummyRequest from pyramid.testing import testConfig def test_add_formatter() -> None:", "request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None", "config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x: x) formatter = request.registry.settings[\"pyramid_openapi3_formatters\"].get( \"foormatter\", None ) assert formatter(\"foo\")", "custom formatter.\"\"\" with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\") config.pyramid_openapi3_add_formatter(\"foormatter\", lambda x:", "import DummyRequest from pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test registration of", "from pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test registration of a custom", "registration of a custom formatter.\"\"\" with testConfig() as config: request = DummyRequest() config.include(\"pyramid_openapi3\")", "DummyRequest from pyramid.testing import testConfig def test_add_formatter() -> None: \"\"\"Test registration of a" ]
[ "class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why are there", "TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow()", "python \"\"\"Test USCG specific 8:367:22 area notice message Version 23 samples.\"\"\" import datetime", "self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type,", "self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson):", "now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this", "self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min)", "#!/usr/bin/env python \"\"\"Test USCG specific 8:367:22 area notice message Version 23 samples.\"\"\" import", "TODO(grepjohnson): Why are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an", "import AreaNotice # from m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle #", "self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute)", "from ais_area_notice import m366_22 # from m366_22 import AreaNotice # from m366_22 import", "aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., ) if", "from m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from m366_22 import", "@unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why are there two messages?", "are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm])", "# from m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from m366_22", "#'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., ) if __name__ == '__main__': unittest.main()", "import AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice)", "now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id)", "Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why are there two messages? aivdm", "AreaNoticeSector # from m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText # from", "failure.') def testCircle(self): # TODO(grepjohnson): Why are there two messages? aivdm = (", "area_type = 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type)", "import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1", "an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day)", "import m366_22 # from m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle #", "import AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly #", "# from m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText # from m366_22", "Why are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an =", "self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why", "def testCircle(self): # TODO(grepjohnson): Why are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F'", "# from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self):", "this failure.') def testCircle(self): # TODO(grepjohnson): Why are there two messages? aivdm =", "Version 23 samples.\"\"\" import datetime import unittest from ais_area_notice import m366_22 # from", "# from m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly # from m366_22", "= 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year,", "self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second,", "23 samples.\"\"\" import datetime import unittest from ais_area_notice import m366_22 # from m366_22", "area notice message Version 23 samples.\"\"\" import datetime import unittest from ais_area_notice import", "AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from", "= ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., ) if __name__", "AreaNoticePoly # from m366_22 import AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase):", "1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year)", "\"\"\"Test USCG specific 8:367:22 area notice message Version 23 samples.\"\"\" import datetime import", "m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly", "there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) #", "from m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText # from m366_22 import", "# from m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from m366_22", "class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now =", "area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0)", "from m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly # from m366_22 import", "datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day,", "two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an.,", "def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas)", "self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why are", "AreaNotice # from m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from", "m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText # from m366_22 import SHAPES", "AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def", "0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): #", "def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an", "m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle", "samples.\"\"\" import datetime import unittest from ais_area_notice import m366_22 # from m366_22 import", "# from m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle # from m366_22", "USCG specific 8:367:22 area notice message Version 23 samples.\"\"\" import datetime import unittest", "ais_area_notice import m366_22 # from m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle", "'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., ) if __name__ == '__main__':", "message Version 23 samples.\"\"\" import datetime import unittest from ais_area_notice import m366_22 #", "import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector #", "from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type", "testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an =", "from m366_22 import AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self):", "self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.')", "m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from m366_22 import AreaNoticeSector", "now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr):", "8:367:22 area notice message Version 23 samples.\"\"\" import datetime import unittest from ais_area_notice", "m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour)", "SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now", "self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self):", "notice message Version 23 samples.\"\"\" import datetime import unittest from ais_area_notice import m366_22", "testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type,", "self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi)", "now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month,", "# TODO(grepjohnson): Why are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' )", "messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., )", "testCircle(self): # TODO(grepjohnson): Why are there two messages? aivdm = ( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E'", "from m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle # from m366_22 import", "when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute,", "self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase):", "now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def", "datetime import unittest from ais_area_notice import m366_22 # from m366_22 import AreaNotice #", "TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix this failure.') def testCircle(self): # TODO(grepjohnson): Why are there two", "now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class", "import unittest from ais_area_notice import m366_22 # from m366_22 import AreaNotice # from", "m366_22.AreaNotice) def testInitWithAreaType(self): area_type = 1 now = datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now)", "specific 8:367:22 area notice message Version 23 samples.\"\"\" import datetime import unittest from", "m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText", "= m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month) self.assertEqual(an.when.day, now.day) self.assertEqual(an.when.hour,", "AreaNoticeRectangle # from m366_22 import AreaNoticeSector # from m366_22 import AreaNoticePoly # from", "import AreaNoticePoly # from m366_22 import AreaNoticeText # from m366_22 import SHAPES class", "( '!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F' #'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E' ) an = m366_22.AreaNotice(nmea_strings=[aivdm]) # self.assertEqual(an., ) if __name__ ==", "m366_22 import AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error,", "# from m366_22 import AreaNoticeText # from m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def", "m366_22 # from m366_22 import AreaNotice # from m366_22 import AreaNoticeCircle # from", "import AreaNoticeSector # from m366_22 import AreaNoticePoly # from m366_22 import AreaNoticeText #", "from m366_22 import AreaNoticeCircle # from m366_22 import AreaNoticeRectangle # from m366_22 import", "unittest from ais_area_notice import m366_22 # from m366_22 import AreaNotice # from m366_22", "import datetime import unittest from ais_area_notice import m366_22 # from m366_22 import AreaNotice", "self.assertEqual(an.when.hour, now.hour) self.assertEqual(an.when.minute, now.minute) self.assertEqual(an.when.second, 0) self.assertIsNone(an.duration_min) self.assertIsNone(an.link_id) self.assertIsNone(an.mmsi) class TestVersion23Samples(unittest.TestCase): @unittest.skip('TODO(schwehr): Fix", "= datetime.datetime.utcnow() an = m366_22.AreaNotice(area_type=area_type, when=now) self.assertFalse(an.areas) self.assertEqual(an.area_type, area_type) self.assertEqual(an.when.year, now.year) self.assertEqual(an.when.month, now.month)", "m366_22 import SHAPES class TestAreaNotice(unittest.TestCase): def testEmptyInit(self): self.assertRaises(m366_22.Error, m366_22.AreaNotice) def testInitWithAreaType(self): area_type =" ]
[ "= torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y", "isinstance(batchsize, int),'Batch size should be 100' # Prepare train and test loaders train_loader", "== Y.size()[0] #Convert y back from one hot encoding Y = torch.argmax(Y,dim=1) print('new", "torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset, testset", "import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data():", "batch_size = batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize,", "#Convert y back from one hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10])", "X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0]", "# Split data tensors into dev and test sets X_train, X_test, y_train, y_test", "print('X load: ',X.size()) print('Y load: ',Y.size()) # Split data tensors into dev and", "num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle = True, num_workers=2) return train_loader,", "X, Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ',", "X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back from one", "X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt')", "dev and test sets X_train, X_test, y_train, y_test = train_test_split( \\ X, Y,", "pd import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader from", "= torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) # Split", "def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100' # Prepare", "from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load data", "Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert", "def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back from one hot", "test sets X_train, X_test, y_train, y_test = train_test_split( \\ X, Y, test_size =", "print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt')", "encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size())", "DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y", "= torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size", "one hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y", "from sklearn.model_selection import train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y =", "= TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch", "make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100' # Prepare train", "shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle = True,", "train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True,", "load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y,", "trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100'", "if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test,", "and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2)", "test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle = True, num_workers=2) return train_loader, test_loader", "= train_test_split( \\ X, Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test:", "Y.size()[0] #Convert y back from one hot encoding Y = torch.argmax(Y,dim=1) print('new Y:", "', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train,", "tensors into dev and test sets X_train, X_test, y_train, y_test = train_test_split( \\", "from one hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size())", "data tensors into dev and test sets X_train, X_test, y_train, y_test = train_test_split(", "import train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return", "torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back", "data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert", "numpy as np import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import", "<gh_stars>0 import pandas as pd import numpy as np import torch from torch.utils.data", "torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset, testset def", "batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100' # Prepare train and test", "',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset", "torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0]", "Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle =", "y_train, y_test = train_test_split( \\ X, Y, test_size = 0.20, random_state=42) print('X_train: ',", "print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) # Split data tensors", "assert X.size()[0] == Y.size()[0] #Convert y back from one hot encoding Y =", "TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100):", "batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle =", "0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if", "\\ X, Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train:", "', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt')", "torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load data X", "X_train, X_test, y_train, y_test = train_test_split( \\ X, Y, test_size = 0.20, random_state=42)", "torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset,", "X.size()[0] == Y.size()[0] #Convert y back from one hot encoding Y = torch.argmax(Y,dim=1)", "assert isinstance(batchsize, int),'Batch size should be 100' # Prepare train and test loaders", "def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def", "hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load:", "',X.size()) print('Y load: ',Y.size()) # Split data tensors into dev and test sets", "',Y.size()) # Split data tensors into dev and test sets X_train, X_test, y_train,", "= batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle", "y back from one hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X", "import pandas as pd import numpy as np import torch from torch.utils.data import", "pandas as pd import numpy as np import torch from torch.utils.data import TensorDataset,", "train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset,", "sklearn.model_selection import train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt')", "as np import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split", "',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset =", "save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back from one hot encoding Y", "y_train) testset = TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert", "torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return", "Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) #", "100' # Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize,", "should be 100' # Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size", "split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back from one hot encoding", "random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data:", "print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset =", "torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) # Split data", "y_test = train_test_split( \\ X, Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size())", "loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2) test_loader =", "= 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size())", "testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100' # Prepare train and", "Y: ',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) # Split data tensors into", "test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2) test_loader", "into dev and test sets X_train, X_test, y_train, y_test = train_test_split( \\ X,", "TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size", "sets X_train, X_test, y_train, y_test = train_test_split( \\ X, Y, test_size = 0.20,", "trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset,", "print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset", "True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle = True, num_workers=2) return", "save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test)", "Split data tensors into dev and test sets X_train, X_test, y_train, y_test =", "size should be 100' # Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset,", "return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] == Y.size()[0] #Convert y back from", "be 100' # Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size =", "load: ',X.size()) print('Y load: ',Y.size()) # Split data tensors into dev and test", "return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be", "test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test:", "print('Y load: ',Y.size()) # Split data tensors into dev and test sets X_train,", "y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt') torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt') torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt') torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt') trainset = TensorDataset(X_train, y_train)", "= True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size = batchsize, shuffle = True, num_workers=2)", "as pd import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader", "torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load", "',Y[:10]) print('X load: ',X.size()) print('Y load: ',Y.size()) # Split data tensors into dev", "and test sets X_train, X_test, y_train, y_test = train_test_split( \\ X, Y, test_size", "load: ',Y.size()) # Split data tensors into dev and test sets X_train, X_test,", "int),'Batch size should be 100' # Prepare train and test loaders train_loader =", "= TensorDataset(X_train, y_train) testset = TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset,", "print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size()) print('y_test: ',y_test.size()) if save_data: torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt')", "import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load data X =", "Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size()) print('y_train: ', y_train.size())", "# Prepare train and test loaders train_loader = torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle", "torch.utils.data.DataLoader(trainset, batch_size = batchsize, shuffle = True, num_workers=2) test_loader = torch.utils.data.DataLoader(testset, batch_size =", "np import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split def", "TensorDataset, DataLoader from sklearn.model_selection import train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt')", "X_test, y_train, y_test = train_test_split( \\ X, Y, test_size = 0.20, random_state=42) print('X_train:", "train_test_split def load_data(): #Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y", "= torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False): assert X.size()[0] ==", "train_test_split( \\ X, Y, test_size = 0.20, random_state=42) print('X_train: ', X_train.size()) print('X_test: ',X_test.size())", "import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection", "testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should be 100' #", "#Load data X = torch.load('/gscratch/stf/jgershon/tensor_x.pt') Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt') return X,Y def split_data(X,Y, save_data=False):", "back from one hot encoding Y = torch.argmax(Y,dim=1) print('new Y: ',Y[:10]) print('X load:", "testset = TensorDataset(X_test, y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize,", "y_test) return trainset, testset def make_data_loader(trainset, testset, batchsize=100): assert isinstance(batchsize, int),'Batch size should" ]
[ "not exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create", "assert e.last_error.message.startswith('Could not read JSON: Can not') else: assert True == False def", "source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel':", "resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError", "baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import BceClientError import nose", "is manual with interver float\"\"\" source = {'key': self.key} capture = { 'mode':", "with interval null\"\"\" source = {'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10,", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'):", "\"\"\"create thumbnail with pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t", "time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1,", "'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as", "manual mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond')", "resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id)", "\"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source", "test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name,", "_NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError", "thumbnail mode is manual with interver float\"\"\" source = {'key': self.key} capture =", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create", "pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in", "pipeline not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True ==", "multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "Can not construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with", "manual with start time is none\"\"\" source = {'key': self.key} capture = {", "capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "BceClientError import nose from nose import tools from nose.tools import assert_raises from nose.tools", "....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source", "BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert True == False,", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp =", "capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "\"\"\"create thumbnail with width pixel less than 10\"\"\" source = {'key': self.key} target", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start", "self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name,", "cannot larger than end time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create", "'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def", "test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than 0\"\"\" source = {'key': self.key}", "except Exception as e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with", "= {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond':", "# # Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10", "delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source)", "def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source = {'key':", "mode is manual with end time none\"\"\" source = {'key': self.key} capture =", "{'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key", "2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create", "{'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is", "'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create", "+ 'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity = 1 self.key =", "'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create", "'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode", "= { 'mode': 'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "time, interval or frame number in auto mode') else: assert True == False", "assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source", "wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\"", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001,", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp", "test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp =", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel,", "nose from nose import tools from nose.tools import assert_raises from nose.tools import assert_is_none", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual", "15:15:40 \"\"\" import os import sys import unittest import json import time import", "assert e.last_error.message.startswith('Could not read JSON: Can not construct') else: assert True == False", "test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target = {'keyPrefix':", "e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\"", "'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp =", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot", "False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver float\"\"\" source =", "capture = { 'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple chars\"\"\" self.key =", "10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit',", "False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime null\"\"\" source =", "source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not be None') def", "height pixel more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in manual", "end time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create", "{ 'mode': 'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name)", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def", "import time import media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'", "def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source = {'key': self.key} capture", "self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4", "mode is manual with interval null\"\"\" source = {'key': '测试视频.mp4'} capture = {", "source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail", "= {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try:", "self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create", "finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in", "from baidubce.exception import BceServerError from baidubce.exception import BceClientError import nose from nose import", "__init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre self.container", "2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import", "'cannot specify start time, end time, interval or frame number in auto mode')", "== False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source = {'key':", "== False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source", "folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create", "False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than 0\"\"\" source =", "'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def", "self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ =", "exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail", "def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp =", "= { 'mode': 'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "object: not_exist.mp4 does not exist') else: assert True == False, 'not throw BceServerError'", "mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH)", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try:", "test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\" source = {'key': self.key} capture", "or frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self):", "JSON: Can not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with", "equal end time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10,", "in manual mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with", "from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from", "= 'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self):", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp =", "assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple chars\"\"\"", "thumbnail with pipeline not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except", "'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail", "less than 0\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond':", "with end time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond':", "else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with", "'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "{'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key", "False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source =", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id)", "{'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert", "with interval time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond':", "'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError", "float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond':", "= self.prefix + 'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity = 1", "else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with", "-1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error,", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than end time')", "1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start", "= self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if", "been deleted') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel,", "source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error,", "Exception as e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\"", "self.prefix + 'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity = 1 self.key", "'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix", "import sys import unittest import json import time import media_config import re import", "def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than 10\"\"\" source = {'key':", "False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than 2000\"\"\" source =", "self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\" self.key =", "key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key} resp =", "self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def", "self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source", "time, end time, interval or frame number in auto mode') else: assert True", "False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target", "'/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from", "= self.pre self.container = 'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix =", "} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error,", "self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second", "number in auto mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail", "= { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name,", "job with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key}", "\"\"\"create env\"\"\" time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create", "'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e:", "source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel':", "def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source = {'key': 'not_exist.mp4'} try:", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self):", "in auto mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56", "assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key", "== False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source = {'key':", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than end time') else:", "capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp =", "while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please", "self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create", "is manual with endtime null\"\"\" source = {'key': self.key} capture = { 'mode':", "def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\" source = {'key': self.key}", "BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with", "= { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name,", "{ 'mode': 'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp =", "with width pixel less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix':", "True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source =", "== False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key}", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source = {'key': self.key} with", "thumbnail with pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError", "{'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key", "else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with", "assert_raises from nose.tools import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create", "assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source", "{'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert", "import BceServerError from baidubce.exception import BceClientError import nose from nose import tools from", "png pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy':", "is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp", "\"\"\"create thumbnail with start time float\"\"\" source = {'key': self.key} capture = {", "-*- coding: utf-8 -*- ######################################################################## # # Copyright 2015 Baidu, Inc. # ########################################################################", "Can not construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with", "\"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key}", "null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond':", "with end time none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10 } resp =", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10}", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in", "in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode", "= { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True ==", "self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp =", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001,", "\"\"\"create thumbnail with height pixel more than 2000\"\"\" source = {'key': self.key} target", "and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name)", "\"\"\"create thumbnail with key prefix is none\"\"\" source = {'key': self.key} resp =", "\"\"\"create thumbnail job with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key':", "self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target = {'keyPrefix':", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end time\"\"\"", "'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\" source = {'key':", "'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is", "exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not')", "tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name =", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, }", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True", "thumbnail with height pixel less than 10\"\"\" source = {'key': self.key} target =", "e.last_error.message.startswith('start time cannot larger than end time') else: assert True == False def", "'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width", "test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name", "\"\"\"create thumbnail mode is manual with interver null\"\"\" source = {'key': self.key} capture", "def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target =", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 }", "try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError):", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in manual mode') else: assert True", "self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as", "'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not be", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5,", "resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ = False", "self.end_time_in_second = 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\"", "throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source = {'key':", "'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy,", "\"\"\"create thumbnail with width pixel more than 2000\"\"\" source = {'key': self.key} target", "thumbnail mode is manual with interver null\"\"\" source = {'key': self.key} capture =", "from nose.tools import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\"", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source =", "'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self):", "e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try:", "self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond':", "less than 0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond': -1} try: resp", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start", "requested pipeline does not exist') else: assert True == False, 'not throw BceServerError'", "assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver", "import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import BceClientError import nose from", "5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode", "source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\" source =", "from baidubce.exception import BceClientError import nose from nose import tools from nose.tools import", "assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end", "BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with", "= {'key': self.key} capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "= {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\"", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not", "test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source = {'key': self.key} target =", "= { 'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "= {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail", "= {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with", "= True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message)", "self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try: resp", "can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key", "False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source", "assert e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline", "in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status !=", "from nose import tools from nose.tools import assert_raises from nose.tools import assert_is_none from", "\"\"\"create thumbnail mode is auto with interval time\"\"\" source = {'key': self.key} capture", "True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\" source", "'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "resp.thumbnails: for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS'", "test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source)", "'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "thumbnail with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as", "def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source = {'key': self.key} capture", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source =", "True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key':", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with", "BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with", "'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg'", "resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED':", "width pixel less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert", "thumbnail mode is manual with endtime null\"\"\" source = {'key': self.key} capture =", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key}", "'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than end", "def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp", "'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode,", "job with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key} resp", "target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True", "'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver null\"\"\" source", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval null\"\"\"", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else:", "#!/usr/bin/env python # -*- coding: utf-8 -*- ######################################################################## # # Copyright 2015 Baidu,", "BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail", "BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with", "with png pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png',", "source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted')", "self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second }", "} capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp", "read JSON: Can not construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create", "try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object:", "enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing',", "auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is", "BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not') else: assert True == False", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail", "True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than 0\"\"\"", "assert e.last_error.message.startswith('start time is required in manual mode') else: assert True == False", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist')", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 }", "'测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name,", "'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\"", "is none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual' } try:", "True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than 2000\"\"\"", "media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH", "nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines:", "ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create", "end time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':", "BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source = {'key': self.key}", "BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end time, interval or", "self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not", "interval or frame number in auto mode') else: assert True == False def", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True", "source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail", "'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self):", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\"", "source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel':", "\"\"\"create thumbnail with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name,", "is required in manual mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp", "pixel more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "\"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key} target = {'keyPrefix':", "\"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format,", "File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import sys import unittest import", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp =", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 }", "empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\"", "with key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error,", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\" source", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=')", "True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver float\"\"\"", "test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than 10\"\"\" source = {'key': self.key}", "{'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10", "capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name,", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create", "normal\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy,", "400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second = 10", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel,", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not') else: assert True ==", "'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail", "def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than 10\"\"\" source = {'key':", "'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self):", "does not exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self):", "self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status", "'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than 0\"\"\" source = {'key':", "self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, }", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\" source", "source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp =", "def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp", "Can not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try:", "= {'key': self.key} capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "with start time less than 0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond':", "is manual with interver null\"\"\" source = {'key': self.key} capture = { 'mode':", "import BceClientError import nose from nose import tools from nose.tools import assert_raises from", "False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval time\"\"\" source =", "exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if", "is auto with interval time\"\"\" source = {'key': self.key} capture = { 'mode':", "\"\"\"create thumbnail with start time less than 0\"\"\" source = {'key': self.key} capture", "test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source = {'key': self.key}", "0\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond':", "!= 'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self):", "from nose.tools import assert_raises from nose.tools import assert_is_none from nose.tools import raises class", "none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than end time') else: assert True", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ = False finally: nose.tools.assert_true(succ)", "start time is none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual'", "import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH +", "\"\"\"create thumbnail with pipeline not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source)", "assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel", "thumbnail with start time float\"\"\" source = {'key': self.key} capture = { 'mode':", "test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval time\"\"\" source = {'key': self.key}", "self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format", "thumbnail mode is auto with end time\"\"\" source = {'key': self.key} capture =", "than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy':", "key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id)", "than start time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20,", "source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel':", "False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source", "read JSON: Can not construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp", "10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail", "thumbnail with png pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "env\"\"\" time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception", "= self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as", "self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if", "target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, }", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000,", "= {'key': self.key} capture = { 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode", "source = {'key': self.key} capture = { 'mode': 'manual' } try: resp =", "def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\" source = {'key': self.key}", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel,", "'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long", "mode is manual with endtime null\"\"\" source = {'key': self.key} capture = {", "'mode': 'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "time is required in manual mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self):", "or frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self):", "source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel", "= { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel':", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp =", "capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "manual with endtime null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "-*- ######################################################################## # # Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source", "+ '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True", "= {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create", "{ 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key':", "not in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png',", "with mode is auto\"\"\" source = {'key': self.key} capture = {'mode': 'auto'} resp", "pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png',", "'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode", "class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix", "= 400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second =", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else:", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp", "test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver float\"\"\" source = {'key': self.key}", "\"\"\"create thumbnail start time equal end time\"\"\" source = {'key': self.key} capture =", "self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second", "capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "== False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end time none\"\"\"", "source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source)", "in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\" source = {'key':", "is manual with start time is none\"\"\" source = {'key': self.key} capture =", "auto with end time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto',", "os import sys import unittest import json import time import media_config import re", "assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with", "\"\"\"create thumbnail mode is manual with endtime less than start time\"\"\" source =", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond')", "'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create", "= {'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10 } try: resp", "try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less", "capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp)", "= { 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else:", "'mode': 'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "= _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import", "True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source =", "= each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails:", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source =", "self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual", "frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime", "\"\"\"create thumbnail mode is manual with end time none\"\"\" source = {'key': self.key}", "nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\" source = {'key':", "def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than 0\"\"\" source = {'key':", "test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source = {'key': self.key}", "= { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "= {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail", "'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with", "= self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\"", "mode is manual with start time is none\"\"\" source = {'key': self.key} capture", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time", "has been deleted') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self):", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\"", "with height pixel less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix':", "long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self):", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in", "not read JSON: Can not construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self):", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert True == False,", "== False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\"", "source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel':", "'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e:", "end time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10,", "source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail", "as e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2)", "= 'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self):", "is manual with interval null\"\"\" source = {'key': '测试视频.mp4'} capture = { 'mode':", "than 0\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0,", "== False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than 2000\"\"\" source", "self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "\"\"\"create thumbnail with width pixel equal 2000\"\"\" source = {'key': self.key} target =", "\"\"\"create thumbnail with pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except", "is manual with endtime less than start time\"\"\" source = {'key': self.key} capture", "source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source =", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True ==", "than end time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode", "source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key':", "True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime null\"\"\"", "pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit',", "with endtime null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100,", "640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50", "= 640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second =", "with pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as", "'../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from baidubce.exception", "\"\"\" import os import sys import unittest import json import time import media_config", "exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if", "self.pre self.container = 'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput'", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False", "== False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver float\"\"\" source", "{'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key", "'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual", "'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source = {'key':", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def", "assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime", "frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create", "capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp =", "self.key = 'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def", "in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy':", "construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\"", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source", "== False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\"", "= 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2)", "dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create", "BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'}", "each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails:", "start time less than 0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond': -1}", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than 10\"\"\" source =", "time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond':", "10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try:", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "\"\"\"create thumbnail with key not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source)", "with end time less than 0\"\"\" source = {'key': self.key} capture = {", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond')", "'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height", "def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than 2000\"\"\" source = {'key':", "'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else:", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert", "source = {'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10 } try:", "assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source =", "def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end time none\"\"\" source =", "assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than", "frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create", "= { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not') else: assert True", "def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None,", "= 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode =", "False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source = {'key': self.key}", "mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time", "with width pixel more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix':", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source", "import os import sys import unittest import json import time import media_config import", "time is none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual' }", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not') else: assert", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key}", "thumbnail with width pixel more than 2000\"\"\" source = {'key': self.key} target =", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval null\"\"\" source", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with", "thumbnail with mode not in enum\"\"\" source = {'key': self.key} capture = {'mode':", "e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start", "mode is manual with interver null\"\"\" source = {'key': self.key} capture = {", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can", "'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval", "def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target =", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger", "pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "nose.tools import assert_raises from nose.tools import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase):", "{'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "\"\"\"create thumbnail mode is manual with start time is none\"\"\" source = {'key':", "create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name", "time less than 0\"\"\" source = {'key': self.key} capture = { 'mode': 'auto',", "manual with interver float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep'", "'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode", "{'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with interver float\"\"\" source = {'key':", "= {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with", "self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix", "{'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png", "else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less", "less than start time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\"", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, }", "source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error,", "= { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "end time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10", "nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job", "each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status", "source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, }", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete", "{'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id)", "resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else: break", "Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "thumbnail job with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key}", "'/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400", "= 0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self):", "{'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10 } try: resp =", "test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source = {'key': self.key} capture =", "test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError):", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert", "BceServerError): assert e.last_error.message.startswith('start time cannot larger than end time') else: assert True ==", "test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than 0\"\"\" source = {'key': self.key}", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10 } resp", "with key not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError", "= {'key': self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "with interver null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1,", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end time, interval or frame", "True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel':", "\"\"\"create thumbnail with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name,", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel,", "'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail", "start time, end time, interval or frame number in auto mode') else: assert", "def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key} target", "mode is auto\"\"\" source = {'key': self.key} capture = {'mode': 'auto'} resp =", "from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import BceClientError import", "= {'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10 } try: resp", "= {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError):", "{'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self):", "in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for", "target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON:", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required", "nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'}", "else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png pic\"\"\" source", "\"\"\"create thumbnail job with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source =", "5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if", "True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than 2000\"\"\"", "'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self):", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50,", "'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self):", "else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal", "float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond':", "= { 'mode': 'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "{'key': self.key} capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "end time, interval or frame number in auto mode') else: assert True ==", "source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\" source = {'key':", "BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight", "time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source =", "self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond':", "assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\"", "import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self):", "test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than 2000\"\"\" source = {'key': self.key}", "{'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp", "null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5", "self.targetBucket) except Exception as e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self):", "thumbnail with end time less than 0\"\"\" source = {'key': self.key} capture =", "with end time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond':", "source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple chars\"\"\" self.key", "else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\"", "with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key} resp =", "nose.tools import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end time,", "{ 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit',", "capture = { 'mode': 'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "width pixel more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create", "time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple", "key not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as", "sys import unittest import json import time import media_config import re import mediaBase", "source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert", "time import media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH", "BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert True == False,", "\"\"\"create thumbnail with key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp =", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\" source =", "= self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp =", "for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if", "enum\"\"\" source = {'key': self.key} capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "\"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import sys import unittest", "time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10 }", "def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'}", "thumbnail start time equal end time\"\"\" source = {'key': self.key} capture = {", "should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source =", "== False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail with end time less than 0\"\"\" source", "with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name,", "target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than 10\"\"\" source", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, }", "else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less", "assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than", "'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "{'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver null\"\"\" source = {'key':", "endtime null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond':", "null\"\"\" source = {'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp", "= {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def", "pixel less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver null\"\"\" source =", "else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel,", "self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel,", "= self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target =", "mode is auto with interval time\"\"\" source = {'key': self.key} capture = {", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not') else:", "'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "{ 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self):", "self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel", "import assert_raises from nose.tools import assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test", "'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture)", "self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline", "self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The", "######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import sys import", "else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with", "time cannot larger than end time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self):", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\"", "import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0,", "self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e:", "{'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True ==", "self.key} capture = { 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id)", "with start time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name,", "def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval time\"\"\" source = {'key':", "mode is manual with endtime less than start time\"\"\" source = {'key': self.key}", "time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10 }", "= {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture", "'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self):", "test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline',", "{ 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25,", "test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import sys import unittest import json", "+ '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from", "thumbnail with key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name,", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10}", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp =", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel,", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert", "end time none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith(", "False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than 0\"\"\" source =", "capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp =", "False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source = {'key': self.key}", "target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name)", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def", "'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source = {'key': self.key}", "time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10 }", "self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except", "'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create", "= '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel =", "self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "\"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre self.container =", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self):", "{'key': self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime less than start time\"\"\" source", "prefix is none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def", "source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1,", "self.pipeline_name = self.pre self.container = 'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix", "'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def", "JSON: Can not construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel,", "larger than end time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail", "'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError", "'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e:", "time equal end time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self):", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=')", "self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode", "'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config)", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name,", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else:", "assert e.last_error.message.startswith('pipeline has been deleted') else: assert True == False, 'not throw BceServerError'", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert True ==", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name,", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name,", "'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second,", "be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is", "BceServerError from baidubce.exception import BceClientError import nose from nose import tools from nose.tools", "chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp)", "thumbnail mode is manual with endtime less than start time\"\"\" source = {'key':", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try:", "else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with", "== False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source = {'key':", "deleted') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start time is", "True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source =", "with endtime less than start time\"\"\" source = {'key': self.key} capture = {", "source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than 10\"\"\"", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name,", "assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than", "time none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10", "'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create", "auto\"\"\" source = {'key': self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self):", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp", "'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id)", "_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start time is none\"\"\"", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel,", "format not in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end time none\"\"\" source = {'key':", "# -*- coding: utf-8 -*- ######################################################################## # # Copyright 2015 Baidu, Inc. #", "e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is", "'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode = 'manual'", "self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond':", "manual with interver null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not", "e.last_error.message.startswith('start time is required in manual mode') else: assert True == False def", "key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self):", "key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def", "resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job", "thumbnail with width pixel less than 10\"\"\" source = {'key': self.key} target =", "capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try:", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel':", "construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel", "prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def", "source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source =", "assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline", "test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key} target =", "interver float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50,", "thumbnail format not in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert True == False, 'not throw", "width pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def", "re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../'", "self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity =", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "start time equal end time\"\"\" source = {'key': self.key} capture = { 'mode':", "\"\"\"create thumbnail with mode not in enum\"\"\" source = {'key': self.key} capture =", "'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create", "'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel':", "mode is manual with interver float\"\"\" source = {'key': self.key} capture = {", "assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source", "0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create", "baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self):", "or frame number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self):", "= {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError):", "2001, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as", "read JSON: Can not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail", "with key prefix is none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel': self.height_in_pixel, }", "== False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval time\"\"\" source", "'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time", "throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source = {'key': self.key}", "def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source = {'key':", "resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp", "= {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with", "time less than 0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond': -1} try:", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, }", "time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond':", "20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail", "number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail", "\"\"\"create thumbnail with mode is auto\"\"\" source = {'key': self.key} capture = {'mode':", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail", "test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "interval time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10", "import media_client from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import", "thumbnail with key prefix is none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name,", "test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp =", "self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 }", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON:", "= {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create", "= '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel =", "auto with interval time\"\"\" source = {'key': self.key} capture = { 'mode': 'auto',", "= False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for", "thumbnail mode is manual with end time none\"\"\" source = {'key': self.key} capture", "self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail", "self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not be None')", "def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source = {'key': self.key} capture", "'format': 'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e:", "with start time is none\"\"\" source = {'key': self.key} capture = { 'mode':", "self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5)", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime less", "test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\" source = {'key': self.key} capture", "with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail", "setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket)", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp =", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert True ==", "== False def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source = {'key':", "self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\" source", "capture = { 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "python # -*- coding: utf-8 -*- ######################################################################## # # Copyright 2015 Baidu, Inc.", "BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import BceClientError import nose from nose", "sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from baidubce.exception import", "mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source =", "endtime less than start time\"\"\" source = {'key': self.key} capture = { 'mode':", "import media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH =", "with pipeline not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError", "include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self):", "import unittest import json import time import media_config import re import mediaBase _NOW_PATH", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5, 'heightInPixel':", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist')", "not construct') else: assert True == False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width", "assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert True == False, 'not", "throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source = {'key':", "less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png',", "= 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client =", "self.key} capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10 }", "def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start time is none\"\"\" source", "'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id)", "self.container = 'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format", "nose.tools.assert_is_not_none(resp) source = {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if", "baidubce.exception import BceClientError import nose from nose import tools from nose.tools import assert_raises", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create", "'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity = 1 self.key = '10s.mp4'", "not read JSON: Can not construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self):", "with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key} resp", "else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with", "0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source =", "if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else:", "'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status !=", "is manual with end time none\"\"\" source = {'key': self.key} capture = {", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal", "def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source = {'key': self.key} try:", "'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time", "= 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy =", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end time\"\"\" source", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert", "\"\"\"create thumbnail with end time float\"\"\" source = {'key': self.key} capture = {", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start time", "else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with", "than 0\"\"\" source = {'key': self.key} capture = {'startTimeInSecond': -1} try: resp =", "BceServerError): assert e.last_error.message.startswith('start time is required in manual mode') else: assert True ==", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than 10\"\"\" source =", "not exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp =", "False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline", "thumbnail mode is manual with start time is none\"\"\" source = {'key': self.key}", "def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket,", "source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver", "test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source =", "os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import", "'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp =", "self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto", "thumbnail mode is manual with interval null\"\"\" source = {'key': '测试视频.mp4'} capture =", "= {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 5,", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime less than start", "thumbnail with height pixel more than 2000\"\"\" source = {'key': self.key} target =", "assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than", "than 10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy':", "self.capacity = 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy", "self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read", "in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy':", "test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\" source = {'key': self.key} target", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval", "string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\" self.key", "self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else:", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel,", "'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert", "def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\" source = {'key': self.key}", "source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\" self.key = 'test--*--中文.mp4'", "'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self):", "time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual", "'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos", "e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel", "True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include", "= {'key': self.key} try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError):", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not construct') else: assert", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_equal_2000(self):", "test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval null\"\"\" source = {'key': '测试视频.mp4'}", "self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else:", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp", "capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp =", "== False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than 2000\"\"\" source", "self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than", "# ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import sys", "'widthInPixel': 5, 'heightInPixel': self.height_in_pixel, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self):", "def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key} target", "number in auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail", "Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\" import os import", "thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key': self.key} try:", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000,", "e: assert e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with", "True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end time", "= self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert True", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert True", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not construct')", "== False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try:", "= {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with", "{ 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "media_client from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError from baidubce.exception import BceClientError", "mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto", "thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name =", "e.last_error.message.startswith('pipeline has been deleted') else: assert True == False, 'not throw BceServerError' def", "self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp", "'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\" source =", "assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with", "self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name,", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto with end", "thumbnail job with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key':", "2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in", "2015/06/10 15:15:40 \"\"\" import os import sys import unittest import json import time", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end time, interval", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is", "BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\" source = {'key': self.key} try:", "else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel':", "-1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "in enum\"\"\" source = {'key': self.key} capture = {'mode': 'notmode'} try: resp =", "'10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp", "import json import time import media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__))", "10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is", "self.client.create_thumbnail_job(self.pipeline_name, source, target, capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create", "equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy':", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2001, 'heightInPixel':", "not read JSON: Can not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create", "'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create", "each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1):", "not construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail with png", "empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('',", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start", "if (pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp", "def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime less than start time\"\"\"", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False", "!= 'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp", "source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does", "source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than 10\"\"\"", "} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error,", "test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source = {'key':", "target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True", "mode not in enum\"\"\" source = {'key': self.key} capture = {'mode': 'notmode'} try:", "{'key': self.key} capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail with width pixel less than 10\"\"\" source", "capture = { 'mode': 'auto', 'endTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self): \"\"\"create thumbnail with key include folder\"\"\" source =", "baidubce.exception import BceServerError from baidubce.exception import BceClientError import nose from nose import tools", "try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ =", "BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not construct') else: assert True ==", "\"\"\"create thumbnail mode is manual with interval null\"\"\" source = {'key': '测试视频.mp4'} capture", "\"\"\"create thumbnail with end time less than 0\"\"\" source = {'key': self.key} capture", "capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver null\"\"\"", "self.key = 'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def", "assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert True == False, 'not", "assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34, 'intervalInSecond':", "capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert", "env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if", "Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40 \"\"\"", "source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\" source =", "False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\" source = {'key':", "== False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than 0\"\"\" source", "'mode': 'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is", "assert_is_none from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with key long name\"\"\"", "test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end time float\"\"\" source = {'key': self.key} capture =", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time cannot larger than end time') else: assert", "'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if", "2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with", "== False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime null\"\"\" source", "def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError,", "with key long name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp)", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id)", "\"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb'", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True ==", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False", "True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ", "not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time", "'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail", "resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target", "as e: assert e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail", "e.last_error.message.startswith('The requested pipeline does not exist') else: assert True == False, 'not throw", "= 'mp4' self.capacity = 1 self.key = '10s.mp4' self.key_prefix = '/00mingxioutput' self.target_format =", "def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create thumbnail job with key is chiness\"\"\" self.key = 'test--*--中文.mp4' source", "nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self)", "= {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10 } try: resp", "'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "start time\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10,", "False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is manual with end time none\"\"\" source", "= {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_long_name(self): \"\"\"create thumbnail with", "test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less than 10\"\"\" source = {'key': self.key}", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height", "test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime null\"\"\" source = {'key': self.key}", "json import time import media_config import re import mediaBase _NOW_PATH = os.path.dirname(os.path.abspath(__file__)) +", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not construct') else: assert True", "with end time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual',", "'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is manual with endtime null\"\"\" source = {'key':", "test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is manual with interver null\"\"\" source = {'key': self.key}", "= media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try: resp =", "######################################################################## # # Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date:", "\"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline", "test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key': self.key} target =", "0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def", "time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name if (pipeline_name.startswith(self.pre)):", "'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode", "source = {'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10 } try:", "TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix +", "float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond':", "mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre self.container = 'mp4' self.capacity", "thumbnail with pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be", "'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def", "{'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel':", "{ 'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "48.34, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail", "thumbnail with end time float\"\"\" source = {'key': self.key} capture = { 'mode':", "for each_job in resp.thumbnails: while(1): resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and", "= self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please wait ....\\n')", "utf-8 -*- ######################################################################## # # Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File:", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 }", "assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode", "{'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp =", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot specify start time,", "# Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\" File: test_create_thumbnail.py Date: 2015/06/10 15:15:40", "resp = self.client.get_thumbnail_job(each_job.job_id) if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED': print('please wait", "_COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception import BceHttpClientError from baidubce.exception import BceServerError", "thumbnail with mode is auto\"\"\" source = {'key': self.key} capture = {'mode': 'auto'}", "unittest import json import time import media_config import re import mediaBase _NOW_PATH =", "e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp", "= self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ = False finally:", "50 self.interval_in_second = 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ", "target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source = {'key':", "'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "= 10 self.client = media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True", "\"\"\"create thumbnail with height pixel less than 10\"\"\" source = {'key': self.key} target", "BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert True == False, 'not throw", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'cannot", "= os.path.dirname(os.path.abspath(__file__)) + '/' _COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media", "with mode not in enum\"\"\" source = {'key': self.key} capture = {'mode': 'notmode'}", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert True", "interver null\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50,", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "not_exist.mp4 does not exist') else: assert True == False, 'not throw BceServerError' def", "= {'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp", "== False def test_create_thumbnail_widthinpixel_equal_2000(self): \"\"\"create thumbnail with width pixel equal 2000\"\"\" source =", "source = {'key': self.key} capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "= {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name,", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail", "def test_create_thumbnail_heightinpixel_equal_2000(self): \"\"\"create thumbnail withheight pixel equal 2000\"\"\" source = {'key': self.key} target", "-1, 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\"", "capture = { 'mode': 'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create", "JSON: Can not construct') else: assert True == False def test_create_thumbnail_sizingpolicy_in_enum(self): \"\"\"create thumbnail", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist') else: assert True ==", "more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png',", "self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format", "auto mode') else: assert True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is", "end time less than 0\"\"\" source = {'key': self.key} capture = { 'mode':", "True == False, 'not throw BceServerError' def test_create_thumbnail_with_pipeline_none(self): \"\"\"create thumbnail with pipeline none\"\"\"", "'not throw BceServerError' def test_create_thumbnail_with_pipeline_not_exist(self): \"\"\"create thumbnail with pipeline not exist\"\"\" source =", "source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail", "enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt', 'sizingPolicy': self.sizing_policy,", "from nose.tools import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\"", "source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with mode not in enum\"\"\" source", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval null\"\"\" source =", "source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key}", "key prefix is none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id)", "\"\"\"create thumbnail withheight pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix':", "'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError", "assert e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source", "def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\" source = {'key': self.key}", "'png', 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)", "{'key': self.key} capture = { 'mode': 'auto', 'endTimeInSecond': 10 } try: resp =", "auto mode') else: assert True == False def test_create_thumbnail_mode_manual_with_null_endtime(self): \"\"\"create thumbnail mode is", "BceServerError): assert e.last_error.message.startswith( 'cannot specify start time, end time, interval or frame number", "thumbnail withheight pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second = 0", "'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not be None') def test_create_thumbnail_with_pipeline_empty(self):", "else: assert True == False def test_create_thumbnail_interval_float(self): \"\"\"create thumbnail mode is manual with", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def", "break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key}", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in manual mode') else: assert", "'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than 2000\"\"\" source = {'key':", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond':", "if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.intervalInSecond:capture.intervalInSecond') else: assert True == False def test_create_thumbnail_interval_float(self):", "source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read", "{ 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp =", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp =", "True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source =", "'auto', 'startTimeInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "e.last_error.message.startswith('Could not read JSON: Can not construct') else: assert True == False def", "key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def", "with pipeline empty\"\"\" source = {'key': self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty", "succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines()", "with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp)", "self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target,", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('bos object: not_exist.mp4 does not", "thumbnail with start time less than 0\"\"\" source = {'key': self.key} capture =", "mode is auto with end time\"\"\" source = {'key': self.key} capture = {", "e.last_error.message.startswith( 'capture.endTimeInSecond:capture.endTimeInSecond') else: assert True == False def test_create_thumbnail_end_time_float(self): \"\"\"create thumbnail with end", "assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=') else: assert True == False def test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width", "= 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second =", "thumbnail with width pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix':", "else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key':", "'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError", "\"\"\"create thumbnail mode is manual with interver float\"\"\" source = {'key': self.key} capture", "nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source = {'key': 'not_exist.mp4'}", "import tools from nose.tools import assert_raises from nose.tools import assert_is_none from nose.tools import", "{ 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel", "= {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 }", "self.mode = 'manual' self.start_time_in_second = 0 self.end_time_in_second = 50 self.interval_in_second = 10 self.client", "succ = True try: resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) except Exception as e:", "resp.job_status != 'FAILED': print('please wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def", "test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\" source = {'key': self.key} capture =", "assert e.last_error.message.startswith('start time cannot larger than end time') else: assert True == False", "with height pixel more than 2000\"\"\" source = {'key': self.key} target = {'keyPrefix':", "capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual with endtime less than", "test_create_thumbnail_widthinpixel_morethan_2000(self): \"\"\"create thumbnail with width pixel more than 2000\"\"\" source = {'key': self.key}", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, }", "True == False def test_create_thumbnail_start_time_lessthan_0(self): \"\"\"create thumbnail with start time less than 0\"\"\"", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail with height pixel less", "'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "source = {'key': self.key} capture = { 'mode': 'auto', 'intervalInSecond': 10 } try:", "end time') else: assert True == False def test_create_thumbnail_mode_manual_endtime_null(self): \"\"\"create thumbnail mode is", "e.last_error.message.startswith( 'cannot specify start time, end time, interval or frame number in auto", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 5, } try: resp", "thumbnail normal\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy':", "= self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\"", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try:", "manual with interval null\"\"\" source = {'key': '测试视频.mp4'} capture = { 'mode': 'manual',", "} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode", "thumbnail mode is auto with interval time\"\"\" source = {'key': self.key} capture =", "def test_create_thumbnail_normal(self): \"\"\"create thumbnail normal\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel", "def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key is multiple chars\"\"\" self.key = 'job_测试_123.mp4'", "\"\"\"create thumbnail mode is manual with endtime null\"\"\" source = {'key': self.key} capture", "nose import tools from nose.tools import assert_raises from nose.tools import assert_is_none from nose.tools", "pipeline does not exist') else: assert True == False, 'not throw BceServerError' def", "True == False def test_create_thumbnail_mode_auto_with_interval(self): \"\"\"create thumbnail mode is auto with interval time\"\"\"", "name\"\"\" source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create", "'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def", "manual with endtime less than start time\"\"\" source = {'key': self.key} capture =", "interval null\"\"\" source = {'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':", "'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error,", "self.key_prefix = '/00mingxioutput' self.target_format = 'jpg' self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key", "source = {'key': self.key} capture = {'mode': 'notmode'} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "source = {'key': self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "{'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create thumbnail with", "capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_not_in_enum(self): \"\"\"create", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is", "e.last_error.message.startswith('Could not read JSON: Can not') else: assert True == False def test_create_thumbnail_start_time_lessthan_0(self):", "chars\"\"\" self.key = 'job_测试_123.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp)", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5", "start time float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':", "source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in manual mode') else:", "media_client.MediaClient(media_config.config) def setUp(self): \"\"\"create env\"\"\" time.sleep(2) succ = True try: resp = self.client.create_pipeline(self.pipeline_name,", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel,", "self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode':", "source, capture=capture) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith( 'capture.startTimeInSecond:capture.startTimeInSecond') else:", "print(e.message) succ = False finally: nose.tools.assert_true(succ) def tearDown(self): \"\"\"clear env\"\"\" time.sleep(2) resp =", "True == False def test_create_thumbnail_mode_auto_with_endtime(self): \"\"\"create thumbnail mode is auto with end time\"\"\"", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could", "'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "{ 'mode': 'manual', 'startTimeInSecond':100, 'intervalInSecond': 5 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id)", "{'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, } resp", "{'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture =", "isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert True == False, 'not", "capture = { 'mode': 'manual', 'startTimeInSecond':20, 'endTimeInSecond':10, 'intervalInSecond': 5 } try: resp =", "with interver float\"\"\" source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond':1,", "self.sizing_policy = 'keep' self.width_in_pixel = 640 self.height_in_pixel = 400 self.mode = 'manual' self.start_time_in_second", "is auto with end time\"\"\" source = {'key': self.key} capture = { 'mode':", "thumbnail with key not exist\"\"\" source = {'key': 'not_exist.mp4'} try: self.client.create_thumbnail_job(self.pipeline_name, source) except", "required in manual mode') else: assert True == False def test_create_thumbnail_end_time_lessthan_0(self): \"\"\"create thumbnail", "be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source = {'key': self.key}", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('Could not read JSON: Can not construct') else:", "self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_is_multiple_chars(self): \"\"\"create thumbnail job with key", "self.key} capture = {'startTimeInSecond': -1} try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError", "self.key} with nose.tools.assert_raises_regexp(BceClientError, 'pipeline_name can\\'t be empty string'): self.client.create_thumbnail_job('', source) def test_create_thumbnail_with_key_is_chiness(self): \"\"\"create", "\"\"\"create thumbnail mode is auto with end time\"\"\" source = {'key': self.key} capture", "'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second,", "not in enum\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format': 'txt',", "assert e.last_error.message.startswith( 'cannot specify start time, end time, interval or frame number in", "self.sourceBucket, self.targetBucket) except Exception as e: print(e.message) succ = False finally: nose.tools.assert_true(succ) def", "test_create_thumbnail_keyprefix_keydot(self): \"\"\"create thumbnail with key prefix key dot\"\"\" source = {'key': 'test.thumbnail.csdn.mp4'} resp", "test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is none\"\"\" source = {'key': self.key} resp", "'heightInPixel': 2000, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_heightinpixel_lessthan_10(self): \"\"\"create thumbnail", "import raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre", "not be None') def test_create_thumbnail_with_pipeline_empty(self): \"\"\"create thumbnail with pipeline empty\"\"\" source = {'key':", "= {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2001, } try:", "tools from nose.tools import assert_raises from nose.tools import assert_is_none from nose.tools import raises", "print('please wait ....\\n') time.sleep(5) else: break resp = self.client.delete_pipeline(pipeline_name) def test_create_thumbnail_normal(self): \"\"\"create thumbnail", "nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\" source = {'key':", "{'key': self.key} capture = { 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source,", "'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "target = {'keyPrefix': self.key_prefix, 'format': self.target_format, 'sizingPolicy': self.sizing_policy, 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, }", "self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create", "'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as e:", "does not exist') else: assert True == False, 'not throw BceServerError' def test_create_thumbnail_with_key_include_folder(self):", "self.key} target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'notsizing', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel,", "def test_create_thumbnail_with_pipeline_deleted(self): \"\"\"create thumbnail with delete pipeline\"\"\" resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) source =", "e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=') else: assert True == False def test_create_thumbnail_heightinpixel_morethan_2000(self):", "none\"\"\" source = {'key': self.key} capture = { 'mode': 'manual' } try: resp", "{'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp =", "withheight pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix, 'format':", "= { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "manual with end time none\"\"\" source = {'key': self.key} capture = { 'mode':", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self): \"\"\"create thumbnail mode is manual", "{ 'mode': 'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except", "def test_create_thumbnail_mode_manual_interval_null(self): \"\"\"create thumbnail mode is manual with interval null\"\"\" source = {'key':", "self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name,", "test_create_thumbnail_mode_manual_none_starttime(self): \"\"\"create thumbnail mode is manual with start time is none\"\"\" source =", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_interval_less_0(self): \"\"\"create thumbnail mode is", "'mode': 'manual', 'startTimeInSecond': 10 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self):", "resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_keyprefix_none(self): \"\"\"create thumbnail with key prefix is", "source = {'key': self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1, 'endTimeInSecond': 48.34,", "is auto\"\"\" source = {'key': self.key} capture = {'mode': 'auto'} resp = self.client.create_thumbnail_job(self.pipeline_name,", "False def test_create_thumbnail_heightinpixel_morethan_2000(self): \"\"\"create thumbnail with height pixel more than 2000\"\"\" source =", "not in enum\"\"\" source = {'key': self.key} capture = {'mode': 'notmode'} try: resp", "= self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_sizingpolicy_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\"", "{'key': self.key} capture = { 'mode': 'auto', 'startTimeInSecond': 10 } try: resp =", "{ 'mode': 'manual' } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "source = {'key': '测试视频.mp4'} capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 }", "= { 'mode': 'auto', 'startTimeInSecond': 0, 'endTimeInSecond': -1, 'intervalInSecond': 10 } try: resp", "target = {'keyPrefix': self.key_prefix, 'format': 'png', 'sizingPolicy': 'shrinkToFit', 'widthInPixel': self.width_in_pixel, 'heightInPixel': 2000, }", "capture = { 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond':10, 'intervalInSecond': 1.56 } resp = self.client.create_thumbnail_job(self.pipeline_name,", "self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e: assert e.message.startswith('arg \"pipeline_name\" should not", "'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self): \"\"\"create thumbnail", "{ 'mode': 'manual', 'startTimeInSecond':10, 'endTimeInSecond': 20 } resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) print(resp)", "height pixel less than 10\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "try: self.client.create_thumbnail_job(self.pipeline_name, source) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has", "is none\"\"\" source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name, source) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_keyprefix_keydot(self):", "with width pixel equal 2000\"\"\" source = {'key': self.key} target = {'keyPrefix': self.key_prefix,", "raises class TestCreateThumbnail(mediaBase.MediaBase): \"\"\"test create thumbnail\"\"\" def __init__(self): \"\"\"construction \"\"\" mediaBase.MediaBase.__init__(self) self.pre =", "thumbnail with key include folder\"\"\" source = {'key': 'media/info/jobtest.mp4'} resp = self.client.create_thumbnail_job(self.pipeline_name, source)", "self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_format_not_in_enum(self): \"\"\"create thumbnail format not in enum\"\"\" source", "import nose from nose import tools from nose.tools import assert_raises from nose.tools import", "'mode': 'manual', 'startTimeInSecond':1, 'endTimeInSecond':50, 'intervalInSecond': -1 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)", "pipeline none\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job(None, source) except ValueError as e:", "coding: utf-8 -*- ######################################################################## # # Copyright 2015 Baidu, Inc. # ######################################################################## \"\"\"", "False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\" source = {'key': self.key}", "else: assert True == False def test_create_thumbnail_start_time_float(self): \"\"\"create thumbnail with start time float\"\"\"", "self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_start_equal_end(self): \"\"\"create thumbnail start time equal end time\"\"\"", "specify start time, end time, interval or frame number in auto mode') else:", "_COMMON_PATH = _NOW_PATH + '../../../' sys.path.insert(0, _COMMON_PATH) from baidubce.services.media import media_client from baidubce.exception", "source) nose.tools.assert_is_not_none(resp) def test_create_thumbnail_with_key_not_exist(self): \"\"\"create thumbnail with key not exist\"\"\" source = {'key':", "else: assert True == False def test_create_thumbnail_mode_is_auto(self): \"\"\"create thumbnail with mode is auto\"\"\"", "\"\"\" mediaBase.MediaBase.__init__(self) self.pre = self.prefix + 'createthumb' self.pipeline_name = self.pre self.container = 'mp4'", "Date: 2015/06/10 15:15:40 \"\"\" import os import sys import unittest import json import", "not exist\"\"\" source = {'key': self.key} try: self.client.create_thumbnail_job('not_exist_pipeline', source) except BceHttpClientError as e:", "(pipeline_name.startswith(self.pre)): resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name) if resp.thumbnails: for each_job in resp.thumbnails: while(1): resp =", "'auto', 'intervalInSecond': 10 } try: resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) except BceHttpClientError as", "'heightInPixel': self.height_in_pixel, } capture = {'mode': self.mode, 'startTimeInSecond': self.start_time_in_second, 'endTimeInSecond': self.end_time_in_second, 'intervalInSecond': self.interval_in_second", "self.key} capture = { 'mode': 'manual', 'startTimeInSecond': 1.25, 'endTimeInSecond': 50, 'intervalInSecond': 10} resp", "test_create_thumbnail_format_png(self): \"\"\"create thumbnail with png pic\"\"\" source = {'key': self.key} target = {'keyPrefix':", "BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('pipeline has been deleted') else: assert", "\"\"\"clear env\"\"\" time.sleep(2) resp = self.client.list_pipelines() for each_pipeline in resp.pipelines: pipeline_name = each_pipeline.pipeline_name", "except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not", "resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_mode_auto_with_starttime(self): \"\"\"create thumbnail mode is auto", "'widthInPixel': 2000, 'heightInPixel': self.height_in_pixel, } resp = self.client.create_thumbnail_job(self.pipeline_name, source, target) nose.tools.assert_is_not_none(resp.job_id) def test_create_thumbnail_widthinpixel_lessthan_10(self):", "key is chiness\"\"\" self.key = 'test--*--中文.mp4' source = {'key': self.key} resp = self.client.create_thumbnail_job(self.pipeline_name,", "as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('start time is required in manual mode')" ]
[]
[ "position: relative; float: left; margin: 0 105px 40px 105px; } .sb-side-right { float:", "position: relative; overflow: hidden; } /* アイコン画像 */ .icon-img { position: absolute; overflow:", "'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "{ content: \"\"; position: absolute; border-style: solid; top: 16px; z-index: 3; } .sb-txt:after", ".sb-txt-left:after { left: -10px; border-width: 8px 10px 8px 0; border-color: transparent #eee transparent", "soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking']", "soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking']", "content: \"\"; position: absolute; border-style: solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左)", "re import time import random import IPython from google.colab import output n =", "frame and 'month' in frame and 'day' in frame and 'type' in frame", "'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "== 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] =", "0 105px 40px 105px; } .sb-side-right { float: right; } /* 吹き出し内のテキスト */", "[s for s in number if m.match(s)] number = list(map(int, result)) #sn =", "2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku'", "*/ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var", "{bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> '''))", "def number(x): number = list(x) number = [''.join( x for x in number", "sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box -->", "8px 10px; border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下 */ @media (max-width:", "'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right", "# asking から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name' not in frame:", "= list(map(int, result)) #sn = sum(int(c) for c in number) return len(number) def", "= kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!--", "del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year'", "in '\\n') pattern = r'\\d\\d' result = re.match(pattern, Match) if result == None:", "} /* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } }", "class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box", "== 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] =", "'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))", "'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking']", "in frame and 'month' in frame and 'day' in frame and 'type' not", "1 return 'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png'", "== 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] =", "'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in frame and 'month'", "0: # ゾロ目の時 return sn if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う", "and 'month' in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'): del", "} /* 吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px solid #eee; border-radius:", "frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame", "elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33:", "list(x) Match = ''.join( x for x in Match if x not in", "frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return", "not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {}", "sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame", "not in frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] ==", "in frame and 'year' in frame and 'month' in frame and (number(frame['day']) !=", "== 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] =", "+ list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "105px; } .sb-side-right { float: right; } /* 吹き出し内のテキスト */ .sb-txt { position:", "== 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] =", "frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return", "'\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1:", "return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "{ width: 60px; top: 62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left {", "== 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] =", "#eee; color: #333; font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom:", "and (number(frame['day']) != 2 or match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day'", "*/ .sb-txt-left:before { left: -7px; border-width: 7px 10px 7px 0; border-color: transparent #eee", "border-width: 8px 10px 8px 0; border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右)", "} .sb-txt:after { content: \"\"; position: absolute; border-style: solid; top: 15px; z-index: 2;", "return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C')", "result = re.match(pattern, Match) if result == None: return 'None' def soulnumber(X): number", "'year' in frame and 'month' not in frame: frame['asking'] = 'month' # 誕生月をたずねる", "60px; } /* アイコンネーム */ .icon-name { width: 60px; top: 62px; font-size: 9px;", "33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if", "frame and 'type' not in frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。'", "'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number =", "global frame # 外部の状態を参照する if 'asking' in frame: # asking から更新する frame[frame['asking']] =", "'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "height: 60px; } /* アイコンネーム */ .icon-name { width: 60px; top: 62px; font-size:", "[''.join( x for x in number if x not in '\\n')] number =", "11 == 0: # ゾロ目の時 return sn if sn > 9: #2桁の時は return", "with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\">", "and 'year' not in frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if", "if start is not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking'", "!= 2 or match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる", "sum([[*word] for word in number], []) m = re.compile('^[0-9]+$') result = [s for", "frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and", "誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) != 4 or match(frame['year'])", "if 'name' in frame and 'year' not in frame: frame['asking'] = 'year' #", "frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year'", "frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return", "frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del", "output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div", "del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year'", "'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in frame and 'month' in frame", "soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking']", "= 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "and 'manzoku' not in frame: if frame['type'] == '\\nA': #number = list(frame['year']) +", "frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return", "frame and 'day' not in frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'", "'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "/* アイコン画像(左) */ .icon-img-left { left: 0; } /* アイコン画像(右) */ .icon-img-right {", "{ padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane =", "sn if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text):", "6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku'", "in number if m.match(s)] number = list(map(int, result)) #sn = sum(int(c) for c", "import random import IPython from google.colab import output n = 0 def chat(text,", "class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right", "z-index: 3; } .sb-txt:after { content: \"\"; position: absolute; border-style: solid; top: 15px;", "not in '\\n') pattern = r'\\d\\d' result = re.match(pattern, Match) if result ==", "YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'):", "'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] =", "frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and", "62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left { margin: 0 0 30px", "*/ } /* 吹き出し(右) */ .sb-side-right { margin: 0 78px 30px 0; /*", "0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute;", "sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box -->", "chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1 return 'ほ' * n #", "in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'): del frame['year'] frame['asking']", "frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return", "hidden; } /* アイコン画像 */ .icon-img { position: absolute; overflow: hidden; top: 0;", "== 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] =", "soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking']", "'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else:", "return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'name' in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'): del frame['year']", "'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku'", "{ margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */", "if 'name' in frame and 'year' in frame and (number(frame['month']) != 2 or", "0; } /* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute; border-style: solid;", "= 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and", "} .sb-txt-right:after { right: -10px; border-width: 8px 0 8px 10px; border-color: transparent transparent", "1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku'", "soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return", "soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking']", "== 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] =", "= 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name',", "right: -10px; border-width: 8px 0 8px 10px; border-color: transparent transparent transparent #eee; }", "} /* アイコン画像(右) */ .icon-img-right { right: 0; } /* アイコン画像 */ .icon-img", "width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt", "} </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode ==", "return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "= kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!--", "= 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month'])", "return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) +", "6px; background: #eee; color: #333; font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type", "and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return", "frame['type'] != '\\nC': # 占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if", "sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box {", "{ left: -10px; border-width: 8px 10px 8px 0; border-color: transparent #eee transparent transparent;", "in frame and 'year' not in frame: frame['asking'] = 'year' # 誕生年をたずねる return", "is not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame =", "n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat,", "soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking']", "else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul =", "'month' in frame and 'day' in frame and 'type' in frame and 'manzoku'", "number = list(map(int, result)) sn = sum(int(c) for c in number) if sn", "in frame and 'month' in frame and 'day' in frame and 'type' in", "return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "= soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku'", "and (number(frame['month']) != 2 or match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month'", "= 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in", "if sn % 11 == 0: # ゾロ目の時 return sn if sn >", "Match) if result == None: return 'None' def soulnumber(X): number = [''.join( x", "= 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) +", "} /* アイコン画像 */ .icon-img { position: absolute; overflow: hidden; top: 0; width:", "</div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position: relative;", "for x in X if x not in '\\n')] number = sum([[*word] for", "elif soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3:", "content: \"\"; position: absolute; border-style: solid; top: 16px; z-index: 3; } .sb-txt:after {", "'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "'type' in frame and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku']", "frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in", "/* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style>", "= 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] ==", "or match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'", "4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku'", "right: -7px; border-width: 7px 0 7px 10px; border-color: transparent transparent transparent #eee; }", "{ border-radius: 50%; border: 2px solid #eee; } /* アイコンネーム */ .icon-name {", "'\\nA' and frame['type'] != '\\nB' and frame['type'] != '\\nC': # 占います del frame['type']", "del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame", "elif soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku'", "top: 15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width:", "BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left", "= sum(int(c) for c in number) return len(number) def match(x): Match = list(x)", "elif soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8:", "'\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type'", "display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start) # フレーム 状態をもつ辞書 #", "30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right { margin: 0", "= kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\">", ".sb-txt-right:before { right: -7px; border-width: 7px 0 7px 10px; border-color: transparent transparent transparent", "def uranai(input_text): global frame # 外部の状態を参照する if 'asking' in frame: # asking から更新する", "'year' in frame and 'month' in frame and 'day' in frame and 'type'", "left: 0; } /* アイコン画像(右) */ .icon-img-right { right: 0; } /* アイコン画像", "状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {} TYPE = [] def number(x):", "吹き出し(右) */ .sb-side-right { margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ }", "/* 吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px solid #eee; border-radius: 6px;", "list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul", "#TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in", "in frame and 'year' in frame and 'month' not in frame: frame['asking'] =", "--> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt", "frame and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) >= 3:", "sum(int(c) for c in number) if sn % 11 == 0: # ゾロ目の時", "inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width:", "frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return", "float: right; } /* 吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px solid", "in frame and frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!' return", "外部の状態を参照する if 'asking' in frame: # asking から更新する frame[frame['asking']] = input_text del frame['asking']", "18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before", "'正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in frame", "{ .icon-img { width: 60px; height: 60px; } /* アイコンネーム */ .icon-name {", "#2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame # 外部の状態を参照する if", "'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "frame and frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del", "return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right {", "pattern = r'\\d\\d' result = re.match(pattern, Match) if result == None: return 'None'", "'type' not in frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name'", "== 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] =", "number = sum([[*word] for word in number], []) m = re.compile('^[0-9]+$') result =", "(e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } });", "5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku'", "= 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "random import IPython from google.colab import output n = 0 def chat(text, **kw):", "frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return", "asking から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name' not in frame: frame['asking']", "top: 16px; z-index: 3; } .sb-txt:after { content: \"\"; position: absolute; border-style: solid;", "吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px 0 7px 10px; border-color: transparent", "padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane = document.getElementById('input');", "= 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in", ".sb-side-left { margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右)", "# 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in frame and", "frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return", "background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert',", "frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return", "22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku'", "frame and 'year' in frame and 'month' in frame and 'day' in frame", "'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon',", "in frame and 'month' not in frame: frame['asking'] = 'month' # 誕生月をたずねる return", "n += 1 return 'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON", "'name' in frame and 'year' in frame and (number(frame['month']) != 2 or match(frame['month'])", ".icon-img-right { right: 0; } /* アイコン画像 */ .icon-img img { border-radius: 50%;", "frame # 外部の状態を参照する if 'asking' in frame: # asking から更新する frame[frame['asking']] = input_text", "if m.match(s)] number = list(map(int, result)) sn = sum(int(c) for c in number)", "return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "7px 10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after { right: -10px; border-width:", "= 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name'", "and 'type' in frame and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del", "margin: 0 105px 40px 105px; } .sb-side-right { float: right; } /* 吹き出し内のテキスト", "if 'name' in frame and 'year' in frame and 'month' in frame and", "in number if m.match(s)] number = list(map(int, result)) sn = sum(int(c) for c", "frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'): del frame['year'] frame['asking'] =", "frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul", "border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下 */ @media (max-width: 767px) {", "x in X if x not in '\\n')] number = sum([[*word] for word", ".icon-img img { border-radius: 50%; border: 2px solid #eee; } /* アイコンネーム */", "15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px", "'name' not in frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name']", "elif soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22:", "class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box", "None: return 'None' def soulnumber(X): number = [''.join( x for x in X", "2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku'", "frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type']", "= 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking']", "return len(number) def match(x): Match = list(x) Match = ''.join( x for x", "'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "in frame: # asking から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name' not", "import IPython from google.colab import output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える)", "'\\nB' and frame['type'] != '\\nC': # 占います del frame['type'] frame['asking'] = 'type' return", "return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "in frame and 'day' in frame and frame['type'] != '\\nA' and frame['type'] !=", "*/ .sb-side-right { margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /*", "33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if", "<div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side", "= 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in", "TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking']", "'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "} /* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img { width: 60px; height:", "--> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon',", "8px 10px 8px 0; border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右) */", "'day' in frame and 'type' not in frame: # 占います frame['asking'] = 'type'", "'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month'])", "frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return", "number if m.match(s)] number = list(map(int, result)) #sn = sum(int(c) for c in", "top: 62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left { margin: 0 0", "100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text)", "list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku'", "transparent transparent #eee; } .sb-txt-right:after { right: -10px; border-width: 8px 0 8px 10px;", "アイコン画像 */ .icon-img { position: absolute; overflow: hidden; top: 0; width: 80px; height:", "--> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt", "== 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] =", "text-align: center; top: 83px; color: #fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left", "{ google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div> <div", "len(number) def match(x): Match = list(x) Match = ''.join( x for x in", "<style> /* 全体 */ .sb-box { position: relative; overflow: hidden; } /* アイコン画像", "/* 吹き出し */ .sb-side { position: relative; float: left; margin: 0 105px 40px", "return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "<div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div>", "[] def number(x): number = list(x) number = [''.join( x for x in", "'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "Match = ''.join( x for x in Match if x not in '\\n')", "color: #333; font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0;", "= [] def number(x): number = list(x) number = [''.join( x for x", "transparent #eee transparent transparent; } .sb-txt-left:after { left: -10px; border-width: 8px 10px 8px", "width: 80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left { left: 0; }", "**kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start) # フレーム", "0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt {", "in frame and 'day' in frame and 'type' not in frame: # 占います", "margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute; border-style:", "8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku'", "767px(iPad)以下 */ @media (max-width: 767px) { .icon-img { width: 60px; height: 60px; }", "*/ .icon-img { position: absolute; overflow: hidden; top: 0; width: 80px; height: 80px;", "return sn if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def", "0; width: 80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left { left: 0;", "/* アイコン画像(右) */ .icon-img-right { right: 0; } /* アイコン画像 */ .icon-img img", "color: #fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left { left: 0; }", "< 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if", "/* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px 0 7px 10px; border-color:", "== '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul ==", "\"\"; position: absolute; border-style: solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左) */", "1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角", "= [s for s in number if m.match(s)] number = list(map(int, result)) sn", "10px; } /* アイコンネーム(左) */ .icon-name-left { left: 0; } /* アイコンネーム(右) */", "*/ .sb-side { position: relative; float: left; margin: 0 105px 40px 105px; }", "for c in number) if sn % 11 == 0: # ゾロ目の時 return", "in number) if sn % 11 == 0: # ゾロ目の時 return sn if", "TYPE = [] def number(x): number = list(x) number = [''.join( x for", "== '\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame", "and 'day' in frame and 'type' in frame and frame['manzoku'] == '\\nN' and", "15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; }", "'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'):", "# 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and", "/.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style>", "'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\"", "= 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "in frame and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) <", "frame and 'manzoku' not in frame: if frame['type'] == '\\nA': #number = list(frame['year'])", "'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "{ right: 0; } /* 吹き出し */ .sb-side { position: relative; float: left;", "border-color: transparent transparent transparent #eee; } .sb-txt-right:after { right: -10px; border-width: 8px 0", "number) if sn % 11 == 0: # ゾロ目の時 return sn if sn", "return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku']", "and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in", "= 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year'])", "--> </div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた')", "c in number) return len(number) def match(x): Match = list(x) Match = ''.join(", "= r'\\d\\d' result = re.match(pattern, Match) if result == None: return 'None' def", "'正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in frame", "'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div", "if frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul", "soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking']", "def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start", "== 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] =", "== 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] =", "x not in '\\n') pattern = r'\\d\\d' result = re.match(pattern, Match) if result", "match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if", "'\\nC': # 占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in", "chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start) #", "{} TYPE = [] def number(x): number = list(x) number = [''.join( x", "-7px; border-width: 7px 10px 7px 0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after", "in number if x not in '\\n')] number = sum([[*word] for word in", "border: 2px solid #eee; } /* アイコンネーム */ .icon-name { position: absolute; width:", "class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left", "} /* 吹き出し(左) */ .sb-side-left { margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く", "0; border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right:", "吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right { margin: 0 78px 30px 0;", "== 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] =", "'month' not in frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name'", "'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1:", "transparent #eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width:", "= 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'year' in frame and 'month' in frame and 'day' not in frame: frame['asking']", "= sum(int(c) for c in number) if sn % 11 == 0: #", "return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "-7px; border-width: 7px 0 7px 10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after", "return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame # 外部の状態を参照する if 'asking'", "m.match(s)] number = list(map(int, result)) sn = sum(int(c) for c in number) if", "'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "and frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!' return output_text def", "frame: # asking から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name' not in", "top: 0; width: 80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left { left:", "0; } /* アイコン画像 */ .icon-img img { border-radius: 50%; border: 2px solid", "frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in", "{ position: relative; float: left; margin: 0 105px 40px 105px; } .sb-side-right {", "and 'type' in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in", "frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in frame", "bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None:", "+ list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if", "list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))", "</script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div>", "number = list(x) number = [''.join( x for x in number if x", "} .sb-txt-left:after { left: -10px; border-width: 8px 10px 8px 0; border-color: transparent #eee", "= kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\">", "frame: if frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A')", "number if x not in '\\n')] number = sum([[*word] for word in number],", "{ left: 0; } /* アイコンネーム(右) */ .icon-name-right { right: 0; } /*", "transparent transparent transparent #eee; } .sb-txt-right:after { right: -10px; border-width: 8px 0 8px", "#eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px", "78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt { padding:", "return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in frame and 'month' not", "not in frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in", "*/ .icon-name { position: absolute; width: 80px; text-align: center; top: 83px; color: #fff;", "[inputPane.value], {}); inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea", "{}); inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input'", "0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right { margin:", "left; margin: 0 105px 40px 105px; } .sb-side-right { float: right; } /*", "'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku'", "3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name'", "soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking']", "'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "time import random import IPython from google.colab import output n = 0 def", "elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4:", "+ list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul ==", "'month' in frame and 'day' in frame and 'type' not in frame: #", "in frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame", "class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left -->", "+= 1 return 'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON =", "'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if", "22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku'", "frame and 'type' in frame and 'manzoku' not in frame: if frame['type'] ==", "frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\\nN'", "frame and 'day' in frame and 'type' not in frame: # 占います frame['asking']", "frame and 'month' not in frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'", ".sb-box { position: relative; overflow: hidden; } /* アイコン画像 */ .icon-img { position:", "= [''.join( x for x in X if x not in '\\n')] number", "if frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name'", "フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {} TYPE = [] def", "/* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img { width: 60px; height: 60px;", "and frame['type'] != '\\nC': # 占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。'", "elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22:", "soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking']", "BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def", "result == None: return 'None' def soulnumber(X): number = [''.join( x for x", "= 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in", "= 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and", "0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after { left: -10px; border-width: 8px", "frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return", "'asking' in frame: # asking から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name'", "in frame and frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type']", "uranai(input_text): global frame # 外部の状態を参照する if 'asking' in frame: # asking から更新する frame[frame['asking']]", "= [''.join( x for x in number if x not in '\\n')] number", ".sb-txt:after { content: \"\"; position: absolute; border-style: solid; top: 15px; z-index: 2; }", "'name' in frame and 'year' in frame and 'month' not in frame: frame['asking']", "in frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame", "<div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!--", "} /* アイコン画像(左) */ .icon-img-left { left: 0; } /* アイコン画像(右) */ .icon-img-right", "position: relative; border: 2px solid #eee; border-radius: 6px; background: #eee; color: #333; font-size:", "soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking']", "'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del", "*/ .sb-txt:before { content: \"\"; position: absolute; border-style: solid; top: 16px; z-index: 3;", "your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\">", "</div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\">", "'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "9px; } /* 吹き出し(左) */ .sb-side-left { margin: 0 0 30px 78px; /*", "transparent #eee; } .sb-txt-right:after { right: -10px; border-width: 8px 0 8px 10px; border-color:", "== 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] =", "'名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not in frame: frame['asking'] = 'year'", "4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku'", "22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku'", "x not in '\\n')] number = sum([[*word] for word in number], []) m", "border-radius: 6px; background: #eee; color: #333; font-size: 15px; line-height: 1.7; padding: 18px; }", "in frame and 'day' in frame and 'type' in frame and 'manzoku' not", "number = list(map(int, result)) #sn = sum(int(c) for c in number) return len(number)", "8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku'", "for s in number if m.match(s)] number = list(map(int, result)) #sn = sum(int(c)", "number], []) m = re.compile('^[0-9]+$') result = [s for s in number if", "list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))", "'\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and", "len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!'", "display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right -->", "--> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'):", "return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in frame and 'month' in", "= 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in", "} }); </script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%;", "elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5:", "right: 0; } /* 吹き出し */ .sb-side { position: relative; float: left; margin:", "/* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px", "= 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "<div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side", "frame['type'] != '\\nB' and frame['type'] != '\\nC': # 占います del frame['type'] frame['asking'] =", "else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year'])", "誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month'", "6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku'", "} } </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode", "13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div>", "return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in", "src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div", ".sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before { content:", "return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month'", "} /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px 10px 7px 0;", "frame and 'year' in frame and 'month' in frame and (number(frame['day']) != 2", "chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon =", "return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in", "33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if", "in frame and 'type' in frame and frame['manzoku'] != '\\nY' and frame['manzoku'] !=", "誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and (number(frame['month'])", "{ width: 60px; height: 60px; } /* アイコンネーム */ .icon-name { width: 60px;", "} .sb-side-right { float: right; } /* 吹き出し内のテキスト */ .sb-txt { position: relative;", "return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking']", "icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side", "r'\\d\\d' result = re.match(pattern, Match) if result == None: return 'None' def soulnumber(X):", "if 'asking' in frame: # asking から更新する frame[frame['asking']] = input_text del frame['asking'] if", "Match if x not in '\\n') pattern = r'\\d\\d' result = re.match(pattern, Match)", "frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return", "'year' in frame and 'month' in frame and 'day' in frame and frame['type']", "id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw)", "frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year'", "== 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] =", "return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "!= 4 or match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる", "!= '\\nC': # 占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name'", "/* アイコンネーム */ .icon-name { width: 60px; top: 62px; font-size: 9px; } /*", "'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "/* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e)", "= 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "for x in number if x not in '\\n')] number = sum([[*word] for", "in number], []) m = re.compile('^[0-9]+$') result = [s for s in number", "{ right: -7px; border-width: 7px 0 7px 10px; border-color: transparent transparent transparent #eee;", "'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in", "= 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "right; } /* 吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px solid #eee;", "frame and 'month' in frame and 'day' in frame and frame['type'] != '\\nA'", "*/ .sb-txt { position: relative; border: 2px solid #eee; border-radius: 6px; background: #eee;", "elif soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5:", "'正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in frame", "*/ .sb-box { position: relative; overflow: hidden; } /* アイコン画像 */ .icon-img {", "{ float: right; } /* 吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px", "frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in", "not in frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in", "'manzoku' not in frame: if frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month'])", "in frame and 'day' in frame and 'type' in frame and frame['manzoku'] ==", "* n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat =", "5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku'", "kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img", "'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "relative; border: 2px solid #eee; border-radius: 6px; background: #eee; color: #333; font-size: 15px;", "{ position: absolute; width: 80px; text-align: center; top: 83px; color: #fff; font-size: 10px;", "in frame and 'manzoku' not in frame: if frame['type'] == '\\nA': #number =", "0; } /* アイコンネーム(右) */ .icon-name-right { right: 0; } /* 吹き出し */", "== 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] =", "soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking']", "s in number if m.match(s)] number = list(map(int, result)) #sn = sum(int(c) for", "= sum([[*word] for word in number], []) m = re.compile('^[0-9]+$') result = [s", "number(x): number = list(x) number = [''.join( x for x in number if", "== 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] =", "}); </script> <div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background:", "8px 0 8px 10px; border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下 */", "frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year'", "10px; border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下 */ @media (max-width: 767px)", "'type' in frame and 'manzoku' not in frame: if frame['type'] == '\\nA': #number", "X if x not in '\\n')] number = sum([[*word] for word in number],", "'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame", "#number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month'])", "'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in frame and 'month' in frame", "'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in frame and 'month'", "frame and 'year' in frame and 'month' not in frame: frame['asking'] = 'month'", "if soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2:", "吹き出し内のテキスト */ .sb-txt { position: relative; border: 2px solid #eee; border-radius: 6px; background:", "'asking' frame = {} TYPE = [] def number(x): number = list(x) number", "icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!--", "== 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] =", "top: 83px; color: #fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left { left:", "x in number if x not in '\\n')] number = sum([[*word] for word", "= re.compile('^[0-9]+$') result = [s for s in number if m.match(s)] number =", "4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku'", ".icon-name { position: absolute; width: 80px; text-align: center; top: 83px; color: #fff; font-size:", "'正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in frame", "'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "/.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text}", "+ list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul ==", "icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!--", "frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in", "soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking']", ".icon-name { width: 60px; top: 62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left", "list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1:", ".sb-txt { position: relative; border: 2px solid #eee; border-radius: 6px; background: #eee; color:", "'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "2px solid #eee; border-radius: 6px; background: #eee; color: #333; font-size: 15px; line-height: 1.7;", "'\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) +", "'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name =", "frame and 'day' in frame and 'type' in frame and frame['manzoku'] != '\\nY'", "11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku'", "#eee; border-radius: 6px; background: #eee; color: #333; font-size: 15px; line-height: 1.7; padding: 18px;", "soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking']", "frame and 'type' in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name'", "-10px; border-width: 8px 10px 8px 0; border-color: transparent #eee transparent transparent; } /*", "elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33:", "return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number", "!= '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。'", "アイコンネーム(右) */ .icon-name-right { right: 0; } /* 吹き出し */ .sb-side { position:", "frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return", "match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if", "# 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and", "} /* アイコンネーム(右) */ .icon-name-right { right: 0; } /* 吹き出し */ .sb-side", "icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!--", "soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking']", "*/ .icon-img img { border-radius: 50%; border: 2px solid #eee; } /* アイコンネーム", ".sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane", "return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left { left: 0; } /* アイコンネーム(右)", "def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name',", "{ right: 0; } /* アイコン画像 */ .icon-img img { border-radius: 50%; border:", "2 or match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return", "if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div id='output'", "= {} TYPE = [] def number(x): number = list(x) number = [''.join(", "for x in Match if x not in '\\n') pattern = r'\\d\\d' result", "frame['asking'] if 'name' not in frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?'", ".icon-name-right { right: 0; } /* 吹き出し */ .sb-side { position: relative; float:", "--> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position:", "# 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and", "in frame and 'day' not in frame: frame['asking'] = 'day' # 誕生日をたずねる return", "83px; color: #fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left { left: 0;", "import re import time import random import IPython from google.colab import output n", "line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /*", "<div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div", "'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "n n += 1 return 'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png'", "9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku'", "= 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in frame", "frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year'])", "elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11:", "0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /*", "height: 80px; } /* アイコン画像(左) */ .icon-img-left { left: 0; } /* アイコン画像(右)", "display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div", "*/ .icon-name-right { right: 0; } /* 吹き出し */ .sb-side { position: relative;", "convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is", "<div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!--", "word in number], []) m = re.compile('^[0-9]+$') result = [s for s in", "'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year'", "in frame: if frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day'])", "50%; border: 2px solid #eee; } /* アイコンネーム */ .icon-name { position: absolute;", "7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku'", "} .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before {", "elif soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9:", "*/ } } </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => {", "名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name' return", "'birthday', 'asking' frame = {} TYPE = [] def number(x): number = list(x)", "(number(frame['month']) != 2 or match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month' #", "border-color: transparent #eee transparent transparent; } .sb-txt-left:after { left: -10px; border-width: 8px 10px", "== 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] =", "3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku'", "def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f'''", "position: absolute; border-style: solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before", "return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "<div id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> '''))", "'year' in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'): del frame['month']", "del frame['asking'] if 'name' not in frame: frame['asking'] = 'name' # 名前をたずねる return", "0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1 return 'ほ' *", "width: 60px; height: 60px; } /* アイコンネーム */ .icon-name { width: 60px; top:", "'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame", "== '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in frame and", "in number) return len(number) def match(x): Match = list(x) Match = ''.join( x", "return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "吹き出し */ .sb-side { position: relative; float: left; margin: 0 105px 40px 105px;", "icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!--", "return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left -->", "soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking']", "= 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name']", "and 'year' in frame and 'month' not in frame: frame['asking'] = 'month' #", "frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'): del frame['month'] frame['asking'] =", "!= '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame", "frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return", "list(map(int, result)) sn = sum(int(c) for c in number) if sn % 11", "0; } /* 吹き出し */ .sb-side { position: relative; float: left; margin: 0", "# 外部の状態を参照する if 'asking' in frame: # asking から更新する frame[frame['asking']] = input_text del", "import time import random import IPython from google.colab import output n = 0", "return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\"", "and 'year' in frame and 'month' in frame and 'day' in frame and", "elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22:", "吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */", "{ margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */", "/* 吹き出し(右) */ .sb-side-right { margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */", "= 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right", "= 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div id='output' style='background: #66d;'></div> <div style='text-align:", "'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt { padding: 12px;", "frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year']) +", "'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "4 or match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return", "inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value],", "アイコン画像(右) */ .icon-img-right { right: 0; } /* アイコン画像 */ .icon-img img {", "= 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) !=", "elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku'", "#sn = sum(int(c) for c in number) return len(number) def match(x): Match =", "in frame and 'type' not in frame: # 占います frame['asking'] = 'type' return", "frame and 'month' in frame and 'day' not in frame: frame['asking'] = 'day'", "@media (max-width: 767px) { .icon-img { width: 60px; height: 60px; } /* アイコンネーム", "'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in frame", "{ padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before { content: \"\";", "elif soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11:", "アイコンネーム */ .icon-name { position: absolute; width: 80px; text-align: center; top: 83px; color:", "frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return", "8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku'", "'type' in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame", "and 'month' in frame and 'day' in frame and 'type' in frame and", "占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and", "</div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */", "= 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "== 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] =", "return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and (number(frame['month']) !=", "{your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> '''))", "'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "absolute; overflow: hidden; top: 0; width: 80px; height: 80px; } /* アイコン画像(左) */", "kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img", "soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking']", "position: absolute; border-style: solid; top: 16px; z-index: 3; } .sb-txt:after { content: \"\";", "7px 10px 7px 0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after { left:", "solid; top: 16px; z-index: 3; } .sb-txt:after { content: \"\"; position: absolute; border-style:", "soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking']", "= input_text del frame['asking'] if 'name' not in frame: frame['asking'] = 'name' #", "アイコンネーム */ .icon-name { width: 60px; top: 62px; font-size: 9px; } /* 吹き出し(左)", "re.match(pattern, Match) if result == None: return 'None' def soulnumber(X): number = [''.join(", "global n n += 1 return 'ほ' * n # アイコンの指定 BOT_ICON =", "'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */", "if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global", "return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) != 4 or match(frame['year']) ==", "=> { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script>", "and 'day' in frame and 'type' not in frame: # 占います frame['asking'] =", "list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul", "'day' in frame and 'type' in frame and frame['manzoku'] != '\\nY' and frame['manzoku']", ".sb-txt-left:before { left: -7px; border-width: 7px 10px 7px 0; border-color: transparent #eee transparent", "= 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in", "*/ .icon-name { width: 60px; top: 62px; font-size: 9px; } /* 吹き出し(左) */", "= 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left { left: 0; } /*", "== 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] =", "% 11 == 0: # ゾロ目の時 return sn if sn > 9: #2桁の時は", "= [s for s in number if m.match(s)] number = list(map(int, result)) #sn", "in frame and 'type' in frame and 'manzoku' not in frame: if frame['type']", "sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /*", "'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame", "} /* アイコンネーム */ .icon-name { width: 60px; top: 62px; font-size: 9px; }", "= 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "border-style: solid; top: 16px; z-index: 3; } .sb-txt:after { content: \"\"; position: absolute;", "/* アイコン画像 */ .icon-img img { border-radius: 50%; border: 2px solid #eee; }", "frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and", "solid #eee; border-radius: 6px; background: #eee; color: #333; font-size: 15px; line-height: 1.7; padding:", "def match(x): Match = list(x) Match = ''.join( x for x in Match", "占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in", "'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking']", "number if m.match(s)] number = list(map(int, result)) sn = sum(int(c) for c in", "in frame and frame['type'] != '\\nA' and frame['type'] != '\\nB' and frame['type'] !=", "10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after { right: -10px; border-width: 8px", "{ left: -7px; border-width: 7px 10px 7px 0; border-color: transparent #eee transparent transparent;", "全体 */ .sb-box { position: relative; overflow: hidden; } /* アイコン画像 */ .icon-img", "border-width: 7px 0 7px 10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after {", "== 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] =", "not in frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in", "誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month'", "'name', 'birthday', 'asking' frame = {} TYPE = [] def number(x): number =", "0 7px 10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after { right: -10px;", "kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img", "frame and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] =", "'month' in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'): del frame['day']", "= 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in frame and", "in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'): del frame['month'] frame['asking']", "relative; overflow: hidden; } /* アイコン画像 */ .icon-img { position: absolute; overflow: hidden;", "your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img", "9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku'", "frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking']", "width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt", "= chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon", "== 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "in X if x not in '\\n')] number = sum([[*word] for word in", "frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return", "in '\\n')] number = sum([[*word] for word in number], []) m = re.compile('^[0-9]+$')", "frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return", "frame and 'month' in frame and 'day' in frame and 'type' not in", "if 'name' in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'): del", "#eee transparent transparent; } .sb-txt-left:after { left: -10px; border-width: 8px 10px 8px 0;", "'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "# 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) != 4 or", "or match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'", "'year' in frame and 'month' in frame and (number(frame['day']) != 2 or match(frame['day'])", "for word in number], []) m = re.compile('^[0-9]+$') result = [s for s", "elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3:", "'year' not in frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name'", "None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {} TYPE", "</div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML('''", "return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not in frame: frame['asking'] =", "frame['type'] != '\\nA' and frame['type'] != '\\nB' and frame['type'] != '\\nC': # 占います", "center; top: 83px; color: #fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left {", "</div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def", "# 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name'", "'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type'])", "def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1 return 'ほ' * n", "not in frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in", "soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking']", "'name' in frame and 'year' in frame and 'month' in frame and (number(frame['day'])", "output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1", "x for x in number if x not in '\\n')] number = sum([[*word]", "'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "not in '\\n')] number = sum([[*word] for word in number], []) m =", "'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return", "= list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) +", "<div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right", "<script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) {", "frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not", "(number(frame['day']) != 2 or match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day' #", "list(x) number = [''.join( x for x in number if x not in", "アイコンネーム(左) */ .icon-name-left { left: 0; } /* アイコンネーム(右) */ .icon-name-right { right:", "0 8px 10px; border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下 */ @media", "frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in frame", "in frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame", "# 占います del frame['type'] frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame", "**kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON)", "style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text):", "inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' }", "12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown',", "re.compile('^[0-9]+$') result = [s for s in number if m.match(s)] number = list(map(int,", "'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "absolute; border-style: solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before {", "frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return", "return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in", "return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "アイコン画像(左) */ .icon-img-left { left: 0; } /* アイコン画像(右) */ .icon-img-right { right:", "7px 0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after { left: -10px; border-width:", "<img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\">", "アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw):", "return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "result)) #sn = sum(int(c) for c in number) return len(number) def match(x): Match", "in frame and 'type' in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if", "elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8:", "style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text =", "soulnumber(X): number = [''.join( x for x in X if x not in", "frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return", "/* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right { margin: 0 78px 30px", "= 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in", "and 'month' in frame and 'day' not in frame: frame['asking'] = 'day' #", "font-size: 9px; } /* 吹き出し(左) */ .sb-side-left { margin: 0 0 30px 78px;", "== '\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year'])", "and (number(frame['year']) != 4 or match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year'", "frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'): del frame['day'] frame['asking'] =", "</div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\">", "float: left; margin: 0 105px 40px 105px; } .sb-side-right { float: right; }", "id='output' style='background: #66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def", "transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px 0", "# 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and", "== 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] =", "*/ .icon-img-right { right: 0; } /* アイコン画像 */ .icon-img img { border-radius:", "#eee; } /* アイコンネーム */ .icon-name { position: absolute; width: 80px; text-align: center;", "frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\\nY':", "padding-bottom: 0; margin-bottom: 0; } /* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position:", "'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and (number(frame['year']) != 4", "7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku'", "2 or match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return", "soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking']", "c in number) if sn % 11 == 0: # ゾロ目の時 return sn", "soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking']", "elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3:", "frame = {} TYPE = [] def number(x): number = list(x) number =", "soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] =", "= list(x) number = [''.join( x for x in number if x not", "kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img", "/* アイコンネーム(左) */ .icon-name-left { left: 0; } /* アイコンネーム(右) */ .icon-name-right {", "'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC': TYPE.append('C') soul", "*/ .sb-side-left { margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /*", "= list(x) Match = ''.join( x for x in Match if x not", "+ list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "and 'day' in frame and frame['type'] != '\\nA' and frame['type'] != '\\nB' and", "frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return", "display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position: relative; overflow: hidden; } /*", "(max-width: 767px) { .icon-img { width: 60px; height: 60px; } /* アイコンネーム */", "solid #eee; } /* アイコンネーム */ .icon-name { position: absolute; width: 80px; text-align:", "</style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13)", "!= 2 or match(frame['month']) == 'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる", "'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in frame and 'month' in frame", "in frame and 'year' in frame and 'month' in frame and 'day' in", "/* 全体 */ .sb-box { position: relative; overflow: hidden; } /* アイコン画像 */", "'manzoku' return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else:", "9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku'", "frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year'", "1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku'", "return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", ".icon-img-left { left: 0; } /* アイコン画像(右) */ .icon-img-right { right: 0; }", "soul == 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking']", "frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return", "{ right: -10px; border-width: 8px 0 8px 10px; border-color: transparent transparent transparent #eee;", "soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return", "border-width: 7px 10px 7px 0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after {", "return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。'", "list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "吹き出し(左) */ .sb-side-left { margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */ }", "list(map(int, result)) #sn = sum(int(c) for c in number) return len(number) def match(x):", "''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if", "font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom: 0;", "ゾロ目の時 return sn if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn", "and 'month' in frame and 'day' in frame and frame['type'] != '\\nA' and", "60px; top: 62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left { margin: 0", "'\\n': del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and", "} /* アイコンネーム */ .icon-name { position: absolute; width: 80px; text-align: center; top:", "== 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "== 4: frame['asking'] = 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] =", "/.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box", "frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return", "+ list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if", "if x not in '\\n')] number = sum([[*word] for word in number], [])", "から更新する frame[frame['asking']] = input_text del frame['asking'] if 'name' not in frame: frame['asking'] =", "x in Match if x not in '\\n') pattern = r'\\d\\d' result =", "frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type']", "convert) if start is not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday',", "x for x in Match if x not in '\\n') pattern = r'\\d\\d'", "# 'name', 'birthday', 'asking' frame = {} TYPE = [] def number(x): number", "''.join( x for x in Match if x not in '\\n') pattern =", "soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking']", "} /* アイコンネーム(左) */ .icon-name-left { left: 0; } /* アイコンネーム(右) */ .icon-name-right", "7px 0 7px 10px; border-color: transparent transparent transparent #eee; } .sb-txt-right:after { right:", "border: 2px solid #eee; border-radius: 6px; background: #eee; color: #333; font-size: 15px; line-height:", "frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return", "--> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position: relative; overflow: hidden;", "return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking']", "transparent #eee; } /* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img { width:", "{ position: relative; overflow: hidden; } /* アイコン画像 */ .icon-img { position: absolute;", "in frame and 'day' in frame and 'type' in frame and frame['manzoku'] !=", "''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON)", "frame and frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!' return output_text", "m = re.compile('^[0-9]+$') result = [s for s in number if m.match(s)] number", "1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku'", "== '\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year'])", "frame and frame['type'] != '\\nA' and frame['type'] != '\\nB' and frame['type'] != '\\nC':", "== 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] =", "elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6:", "2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px 10px 7px", "sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right --> </div><!-- /.sb-side sb-side-right -->", "frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year'", "= 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] ==", "'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking']", "and 'day' not in frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if", "soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking']", "border-radius: 50%; border: 2px solid #eee; } /* アイコンネーム */ .icon-name { position:", "m.match(s)] number = list(map(int, result)) #sn = sum(int(c) for c in number) return", "bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\">", "= 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "x for x in X if x not in '\\n')] number = sum([[*word]", ".sb-txt:before { content: \"\"; position: absolute; border-style: solid; top: 16px; z-index: 3; }", "'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "class=\"sb-box\"> <div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name", "'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] =", "soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return", "elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6:", "= 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "'day' in frame and 'type' in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!'", "for c in number) return len(number) def match(x): Match = list(x) Match =", "[''.join( x for x in X if x not in '\\n')] number =", "+ list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7:", "frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name'", "left: 0; } /* アイコンネーム(右) */ .icon-name-right { right: 0; } /* 吹き出し", "60px; height: 60px; } /* アイコンネーム */ .icon-name { width: 60px; top: 62px;", "number) return len(number) def match(x): Match = list(x) Match = ''.join( x for", "frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul =", "/* アイコンネーム(右) */ .icon-name-right { right: 0; } /* 吹き出し */ .sb-side {", "left: -10px; border-width: 8px 10px 8px 0; border-color: transparent #eee transparent transparent; }", "not in frame: if frame['type'] == '\\nA': #number = list(frame['year']) + list(frame['month']) +", "= 'manzoku' return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "and 'year' in frame and 'month' in frame and 'day' not in frame:", "frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not in", "return 'None' def soulnumber(X): number = [''.join( x for x in X if", "} /* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute; border-style: solid; top:", "= 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "frame and 'year' not in frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'", "s in number if m.match(s)] number = list(map(int, result)) sn = sum(int(c) for", "'name' in frame and 'year' in frame and 'month' in frame and 'day'", "class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name", "} /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px 0 7px 10px;", "in frame and 'year' in frame and (number(frame['month']) != 2 or match(frame['month']) ==", "/* 吹き出し(左) */ .sb-side-left { margin: 0 0 30px 78px; /* 吹き出し(左)の上下左右の余白を狭く */", "吹き出し内の上下左右の余白を-6px */ } } </style> <script> var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) =>", "'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "background: #eee; color: #333; font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type {", "frame and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) < 3:", "== 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] =", "elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5:", "8px 0; border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before {", "== 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div id='output' style='background:", "in frame and 'month' in frame and (number(frame['day']) != 2 or match(frame['day']) ==", "'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in frame", "== 11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] =", "if x not in '\\n') pattern = r'\\d\\d' result = re.match(pattern, Match) if", "and 'year' in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'): del", "--> </div><!-- /.sb-side sb-side-right --> </div><!-- /.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体", "absolute; border-style: solid; top: 16px; z-index: 3; } .sb-txt:after { content: \"\"; position:", "5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku'", "'month' in frame and 'day' in frame and frame['type'] != '\\nA' and frame['type']", "frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and", "frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return", "return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in frame and 'month' in", "and 'type' in frame and 'manzoku' not in frame: if frame['type'] == '\\nA':", "10px 7px 0; border-color: transparent #eee transparent transparent; } .sb-txt-left:after { left: -10px;", "elif soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4:", "'manzoku' return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nC':", "display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {} TYPE =", "</div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name", "elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8:", "class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div", "if 'name' in frame and 'year' in frame and 'month' not in frame:", "3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku'", "if frame['type'] == '\\nC': TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if", "'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month']) +", "'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "105px 40px 105px; } .sb-side-right { float: right; } /* 吹き出し内のテキスト */ .sb-txt", "result = [s for s in number if m.match(s)] number = list(map(int, result))", "'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "80px; text-align: center; top: 83px; color: #fff; font-size: 10px; } /* アイコンネーム(左) */", "bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div class=\"icon-img", "# フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame = {} TYPE = []", "40px 105px; } .sb-side-right { float: right; } /* 吹き出し内のテキスト */ .sb-txt {", "= 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in frame and", "if m.match(s)] number = list(map(int, result)) #sn = sum(int(c) for c in number)", ".icon-img { width: 60px; height: 60px; } /* アイコンネーム */ .icon-name { width:", "in frame and 'year' in frame and 'month' in frame and 'day' not", "elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7:", "== 2: frame['asking'] = 'manzoku' return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] =", "# ゾロ目の時 return sn if sn > 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return", "== 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] =", "frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year'", "'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB':", "frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return", "number = [''.join( x for x in X if x not in '\\n')]", "soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking']", "soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking']", "transparent transparent; } .sb-txt-left:after { left: -10px; border-width: 8px 10px 8px 0; border-color:", "'day' in frame and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE)", "'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month'", "16px; z-index: 3; } .sb-txt:after { content: \"\"; position: absolute; border-style: solid; top:", "elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9:", "アイコン画像 */ .icon-img img { border-radius: 50%; border: 2px solid #eee; } /*", "== 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] =", "'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!'", "return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in", "border-style: solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left:", "frame and 'type' in frame and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN':", "overflow: hidden; } /* アイコン画像 */ .icon-img { position: absolute; overflow: hidden; top:", "#eee; } /* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img { width: 60px;", "or match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'", "solid; top: 15px; z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px;", "right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text,", "in frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame", "display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div", "return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "frame[frame['asking']] = input_text del frame['asking'] if 'name' not in frame: frame['asking'] = 'name'", "#eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert)", "and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return", "return sn def uranai(input_text): global frame # 外部の状態を参照する if 'asking' in frame: #", "elif soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7:", "google.colab import output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n", "elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4:", "list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "'day' in frame and frame['type'] != '\\nA' and frame['type'] != '\\nB' and frame['type']", "= 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1 return 'ほ'", "-10px; border-width: 8px 0 8px 10px; border-color: transparent transparent transparent #eee; } /*", "吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute; border-style: solid; top: 16px; z-index:", "return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in frame and 'month' in", "= ''.join( x for x in Match if x not in '\\n') pattern", "誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in frame and 'month'", "and frame['type'] != '\\nB' and frame['type'] != '\\nC': # 占います del frame['type'] frame['asking']", "'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "and 'type' not in frame: # 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if", "# アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!',", "frame['asking'] = 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame", "2px solid #eee; } /* アイコンネーム */ .icon-name { position: absolute; width: 80px;", "'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "del frame['name'] frame['asking'] = 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year'", "frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return", "</div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon", "'\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) +", "'\\n')] number = sum([[*word] for word in number], []) m = re.compile('^[0-9]+$') result", "border-width: 8px 0 8px 10px; border-color: transparent transparent transparent #eee; } /* 767px(iPad)以下", "3; } .sb-txt:after { content: \"\"; position: absolute; border-style: solid; top: 15px; z-index:", "#66d;'></div> <div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text)", "number = [''.join( x for x in number if x not in '\\n')]", "80px; } /* アイコン画像(左) */ .icon-img-left { left: 0; } /* アイコン画像(右) */", "match(frame['day']) == 'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if", "TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking']", "class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text} </div><!-- /.sb-txt sb-txt-right -->", "== 'None'): del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name'", "} /* アイコン画像 */ .icon-img img { border-radius: 50%; border: 2px solid #eee;", "= document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {});", "z-index: 2; } /* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px 10px", "frame and 'year' in frame and 'month' in frame and 'day' not in", "'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month'", "elif soul == 9: frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11:", "frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in", "in frame and frame['manzoku'] == '\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and", ".sb-side { position: relative; float: left; margin: 0 105px 40px 105px; } .sb-side-right", "11: frame['asking'] = 'manzoku' return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku'", "<div class=\"sb-box\"> <div class=\"icon-img icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div", "'month' in frame and 'day' not in frame: frame['asking'] = 'day' # 誕生日をたずねる", "sn % 11 == 0: # ゾロ目の時 return sn if sn > 9:", "== 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] =", "'\\n') pattern = r'\\d\\d' result = re.match(pattern, Match) if result == None: return", "== 7: frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] =", "= chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start)", "767px) { .icon-img { width: 60px; height: 60px; } /* アイコンネーム */ .icon-name", "{ content: \"\"; position: absolute; border-style: solid; top: 15px; z-index: 2; } /*", "if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2:", "<img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\">", "/.sb-box --> ''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position: relative; overflow:", "and frame['manzoku'] == '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku']", "frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return", "/.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name =", "= 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value=''", "= re.match(pattern, Match) if result == None: return 'None' def soulnumber(X): number =", "'\\nY': return 'よかったです!また占いしにきてくださいね!' if 'name' in frame and 'year' in frame and 'month'", "#333; font-size: 15px; line-height: 1.7; padding: 18px; } .sb-txt>p:last-of-type { padding-bottom: 0; margin-bottom:", "= 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text):", "return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and", "and frame['type'] != '\\nA' and frame['type'] != '\\nB' and frame['type'] != '\\nC': #", "'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "relative; float: left; margin: 0 105px 40px 105px; } .sb-side-right { float: right;", "and 'year' in frame and 'month' in frame and (number(frame['day']) != 2 or", "and 'day' in frame and 'type' in frame and frame['manzoku'] == '\\nY': return", "'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in frame", "吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ } } </style> <script>", "elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9:", "'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat", "== 0: # ゾロ目の時 return sn if sn > 9: #2桁の時は return soulnumber(str(sn))", "/.sb-box --> ''')) def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon =", "sn = sum(int(c) for c in number) if sn % 11 == 0:", "!= '\\nA' and frame['type'] != '\\nB' and frame['type'] != '\\nC': # 占います del", "7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku'", "<div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left", "/* 吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px 10px 7px 0; border-color:", "return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day'])", "'name' in frame and 'year' not in frame: frame['asking'] = 'year' # 誕生年をたずねる", "<div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text", "frame: frame['asking'] = 'year' # 誕生年をたずねる return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and", "run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master')", "<div class=\"icon-img icon-img-right\"> <img src=\"{your_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div>", "'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", ".sb-side-right { float: right; } /* 吹き出し内のテキスト */ .sb-txt { position: relative; border:", "== None: return 'None' def soulnumber(X): number = [''.join( x for x in", "border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px;", "= 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "if frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul", "in frame and frame['manzoku'] != '\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking']", "#eee; } .sb-txt-right:after { right: -10px; border-width: 8px 0 8px 10px; border-color: transparent", "and 'day' in frame and 'type' in frame and frame['manzoku'] != '\\nY' and", "'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト */ .sb-txt", "'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "= 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking']", "time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start) # フレーム 状態をもつ辞書", "and 'month' not in frame: frame['asking'] = 'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if", "frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return", "hidden; top: 0; width: 80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left {", "== 7: frame['asking'] = 'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] =", "10px 8px 0; border-color: transparent #eee transparent transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before", "transparent transparent transparent #eee; } /* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img", "*/ @media (max-width: 767px) { .icon-img { width: 60px; height: 60px; } /*", "def display_you(your_text): with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f'''", "'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month'", "elif soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6:", "start is not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name', 'birthday', 'asking' frame", "'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "frame and 'day' in frame and frame['type'] != '\\nA' and frame['type'] != '\\nB'", "/* アイコンネーム */ .icon-name { position: absolute; width: 80px; text-align: center; top: 83px;", "src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side sb-side-left\"> <div", "frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return", "'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else:", "soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking']", "for s in number if m.match(s)] number = list(map(int, result)) sn = sum(int(c)", "0; } /* アイコン画像(右) */ .icon-img-right { right: 0; } /* アイコン画像 */", "+ list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return", "transparent; } /* 吹き出しの三角(右) */ .sb-txt-right:before { right: -7px; border-width: 7px 0 7px", "+ list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "= 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year' in frame and", "#再帰を使う return sn def uranai(input_text): global frame # 外部の状態を参照する if 'asking' in frame:", "!= '\\nB' and frame['type'] != '\\nC': # 占います del frame['type'] frame['asking'] = 'type'", "elif soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33:", "frame['manzoku'] == '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!' return output_text def start():", "return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return", "'正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name' in frame and 'year' in frame and 'month' not in", "'day' in frame and 'type' in frame and 'manzoku' not in frame: if", "in frame and 'type' in frame and frame['manzoku'] == '\\nN' and len(TYPE) >=", "''')) display(IPython.display.HTML(''' <style> /* 全体 */ .sb-box { position: relative; overflow: hidden; }", "left: -7px; border-width: 7px 10px 7px 0; border-color: transparent #eee transparent transparent; }", "return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and 'month' in", "# 占います frame['asking'] = 'type' return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year'", "soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame # 外部の状態を参照する if 'asking' in", "else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in", "position: absolute; overflow: hidden; top: 0; width: 80px; height: 80px; } /* アイコン画像(左)", "'None' def soulnumber(X): number = [''.join( x for x in X if x", "display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4)) display_bot(bot_text) output.register_callback('notebook.Convert', convert) if start is not", "in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'): del frame['day'] frame['asking']", "frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and", "#チャット用の関数(ここを書き換える) global n n += 1 return 'ほ' * n # アイコンの指定 BOT_ICON", "input_text del frame['asking'] if 'name' not in frame: frame['asking'] = 'name' # 名前をたずねる", "soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking']", "== 'None'): del frame['year'] frame['asking'] = 'year' # 誕生年をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。' if 'name'", "return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and 'month' in", "del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame", "IPython from google.colab import output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global", "= 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 9: frame['asking'] = 'manzoku' return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "icon-img-left\"> <img src=\"{bot_icon}\" width=\"60px\"> </div><!-- /.icon-img icon-img-left --> <div class=\"icon-name icon-name-left\">{bot_name}</div> <div class=\"sb-side", "3: frame['asking'] = 'manzoku' return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku'", "transparent; } .sb-txt-left:after { left: -10px; border-width: 8px 10px 8px 0; border-color: transparent", "if 'name' not in frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if", "width: 60px; top: 62px; font-size: 9px; } /* 吹き出し(左) */ .sb-side-left { margin:", "/.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text):", "n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n += 1 return", "frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and", "'month' # 誕生月をたずねる return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame", "= list(map(int, result)) sn = sum(int(c) for c in number) if sn %", ".icon-name-left { left: 0; } /* アイコンネーム(右) */ .icon-name-right { right: 0; }", "import output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n n +=", "in Match if x not in '\\n') pattern = r'\\d\\d' result = re.match(pattern,", "'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "style='width: 100%; background: #eee;'></textarea></div> ''')) def convert(your_text): display_you(your_text) bot_text = chat(your_text, **kw) time.sleep(random.randint(0,4))", "= 'type' return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。' if 'name' in frame and 'year' in frame and", "and 'month' in frame and 'day' in frame and 'type' not in frame:", "sum(int(c) for c in number) return len(number) def match(x): Match = list(x) Match", "return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "result)) sn = sum(int(c) for c in number) if sn % 11 ==", "def soulnumber(X): number = [''.join( x for x in X if x not", "*/ } /* 吹き出し内のテキスト */ .sb-txt { padding: 12px; /* 吹き出し内の上下左右の余白を-6px */ }", "'manzoku' return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "#number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('A') soul = soulnumber(list(frame['year']) + list(frame['month'])", "output.register_callback('notebook.Convert', convert) if start is not None: display_bot(start) # フレーム 状態をもつ辞書 # 'name',", "img { border-radius: 50%; border: 2px solid #eee; } /* アイコンネーム */ .icon-name", "= list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul = soulnumber(list(frame['year']) + list(frame['month']) +", ".icon-img { position: absolute; overflow: hidden; top: 0; width: 80px; height: 80px; }", "= 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "width: 80px; text-align: center; top: 83px; color: #fff; font-size: 10px; } /* アイコンネーム(左)", "'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku'", "sn def uranai(input_text): global frame # 外部の状態を参照する if 'asking' in frame: # asking", "overflow: hidden; top: 0; width: 80px; height: 80px; } /* アイコン画像(左) */ .icon-img-left", "position: absolute; width: 80px; text-align: center; top: 83px; color: #fff; font-size: 10px; }", "= 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in", "= 'manzoku' return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "del frame['day'] frame['asking'] = 'day' # 誕生日をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame", "absolute; width: 80px; text-align: center; top: 83px; color: #fff; font-size: 10px; } /*", "'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with", "[]) m = re.compile('^[0-9]+$') result = [s for s in number if m.match(s)]", "= 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "= 'manzoku' return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking'] = 'manzoku' return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "transparent transparent #eee; } /* 767px(iPad)以下 */ @media (max-width: 767px) { .icon-img {", "78px; /* 吹き出し(左)の上下左右の余白を狭く */ } /* 吹き出し(右) */ .sb-side-right { margin: 0 78px", "6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 7: frame['asking'] = 'manzoku'", "frame['asking'] = 'manzoku' return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return", "== '\\nN' and len(TYPE) < 3: #TYPE.append(frame['type']) del frame['type'] del frame['manzoku'] frame['asking'] =", "{ left: 0; } /* アイコン画像(右) */ .icon-img-right { right: 0; } /*", "del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame and 'year'", "and 'day' in frame and 'type' in frame and 'manzoku' not in frame:", "in frame and 'month' in frame and 'day' in frame and frame['type'] !=", "9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame # 外部の状態を参照する", "/.icon-img icon-img-right --> <div class=\"icon-name icon-name-right\">{your_name}</div> <div class=\"sb-side sb-side-right\"> <div class=\"sb-txt sb-txt-right\"> {your_text}", "'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame and (number(frame['month']) != 2", "'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not in frame: frame['asking']", "= 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw): def display_bot(bot_text): with output.redirect_to_element('#output'): bot_name", "return 'ほ' * n # アイコンの指定 BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png' YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png' def", "{ if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {}); inputPane.value='' } }); </script> <div", "return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking']", "output.redirect_to_element('#output'): bot_name = kw.get('bot_name', 'Master') bot_icon = kw.get('bot_icon', BOT_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\"> <div", "*/ .sb-txt-right:before { right: -7px; border-width: 7px 0 7px 10px; border-color: transparent transparent", "frame and 'month' in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'):", "'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B')", "sb-side-left\"> <div class=\"sb-txt sb-txt-left\"> {bot_text} </div><!-- /.sb-txt sb-txt-left --> </div><!-- /.sb-side sb-side-left -->", "in frame: frame['asking'] = 'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n':", "> 9: #2桁の時は return soulnumber(str(sn)) #再帰を使う return sn def uranai(input_text): global frame #", "#fff; font-size: 10px; } /* アイコンネーム(左) */ .icon-name-left { left: 0; } /*", "list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "frame['asking'] = 'manzoku' return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 8: frame['asking'] = 'manzoku' return", "*/ .icon-name-left { left: 0; } /* アイコンネーム(右) */ .icon-name-right { right: 0;", "soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking']", "[s for s in number if m.match(s)] number = list(map(int, result)) sn =", "in frame and 'month' in frame and 'day' not in frame: frame['asking'] =", "right: 0; } /* アイコン画像 */ .icon-img img { border-radius: 50%; border: 2px", "'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] =", "\"\"; position: absolute; border-style: solid; top: 16px; z-index: 3; } .sb-txt:after { content:", "'day' not in frame: frame['asking'] = 'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name'", "soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2: frame['asking']", "'name' # 名前をたずねる return 'あなたの名前は?' if frame['name'] == '\\n': del frame['name'] frame['asking'] =", "del frame['type'] del frame['manzoku'] frame['asking'] = 'type' return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!' if 'name' in frame", "frame and 'year' in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'):", "return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 11: frame['asking'] = 'manzoku' return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", ".sb-txt-right:after { right: -10px; border-width: 8px 0 8px 10px; border-color: transparent transparent transparent", "{ position: absolute; overflow: hidden; top: 0; width: 80px; height: 80px; } /*", "**kw): #チャット用の関数(ここを書き換える) global n n += 1 return 'ほ' * n # アイコンの指定", "*/ .icon-img-left { left: 0; } /* アイコン画像(右) */ .icon-img-right { right: 0;", "Match = list(x) Match = ''.join( x for x in Match if x", "match(x): Match = list(x) Match = ''.join( x for x in Match if", "== 33: frame['asking'] = 'manzoku' return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' else: frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "} /* 吹き出し */ .sb-side { position: relative; float: left; margin: 0 105px", "{ position: relative; border: 2px solid #eee; border-radius: 6px; background: #eee; color: #333;", "sb-txt-left --> </div><!-- /.sb-side sb-side-left --> </div><!-- /.sb-box --> ''')) def display_you(your_text): with", "with output.redirect_to_element('#output'): your_name = kw.get('your_name', 'あなた') your_icon = kw.get('your_icon', YOUR_ICON) display(IPython.display.HTML(f''' <div class=\"sb-box\">", "(number(frame['year']) != 4 or match(frame['year']) == 'None'): del frame['year'] frame['asking'] = 'year' #", "var inputPane = document.getElementById('input'); inputPane.addEventListener('keydown', (e) => { if(e.keyCode == 13) { google.colab.kernel.invokeFunction('notebook.Convert',", "return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 6: frame['asking'] = 'manzoku' return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 3: frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 2:", "<reponame>heeeedgehog/chat2021 import re import time import random import IPython from google.colab import output", "== 'None'): del frame['month'] frame['asking'] = 'month' # 誕生月をたずねる return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。' if 'name'", "'day' # 誕生日をたずねる return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。' if 'name' in frame and 'year' in frame", "/* 吹き出しの三角 */ .sb-txt:before { content: \"\"; position: absolute; border-style: solid; top: 16px;", "'\\nY' and frame['manzoku'] != '\\nN': del frame['manzoku'] frame['asking'] = 'manzoku' return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。' if", "} /* 吹き出し(右) */ .sb-side-right { margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く", "frame['asking'] = 'manzoku' return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' if 'name' in frame and 'year' in frame", "+ list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "from google.colab import output n = 0 def chat(text, **kw): #チャット用の関数(ここを書き換える) global n", "if result == None: return 'None' def soulnumber(X): number = [''.join( x for", "return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 22: frame['asking'] = 'manzoku' return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul", "'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif", "TYPE.append('C') soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day'])) if soul == 1: frame['asking']", "= 'name' return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。' if 'name' in frame and 'year' not in frame:", "frame['asking'] = 'manzoku' return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 4: frame['asking'] = 'manzoku' return", "= 'manzoku' return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul == 5: frame['asking'] = 'manzoku' return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'", "list(frame['day'])) if soul == 1: frame['asking'] = 'manzoku' return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。' elif soul ==", "frame['type'] == '\\nB': #number = list(frame['year']) + list(frame['month']) + list(frame['day']) TYPE.append('B') soul =", "== '\\nN' and len(TYPE) >= 3: return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!' return output_text def start(): run_chat(chat=uranai)", ".sb-side-right { margin: 0 78px 30px 0; /* 吹き出し(右)の上下左右の余白を狭く */ } /* 吹き出し内のテキスト", "吹き出しの三角(左) */ .sb-txt-left:before { left: -7px; border-width: 7px 10px 7px 0; border-color: transparent", "frame and 'day' in frame and 'type' in frame and 'manzoku' not in", "/* アイコン画像 */ .icon-img { position: absolute; overflow: hidden; top: 0; width: 80px;" ]
[ "django.apps import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name", "= settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header = settings.MEX_BRAND AdminSite.index_title = \"\"", "-*- coding: utf-8 -*- from django.conf import settings from django.apps import AppConfig from", "settings from django.apps import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name =", "-*- from django.conf import settings from django.apps import AppConfig from django.contrib.admin import AdminSite", "from django.apps import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\"", "# -*- coding: utf-8 -*- from django.conf import settings from django.apps import AppConfig", "import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name =", "django.conf import settings from django.apps import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig):", "\"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header = settings.MEX_BRAND AdminSite.index_title", "class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND", "import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title", "MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header", "utf-8 -*- from django.conf import settings from django.apps import AppConfig from django.contrib.admin import", "AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title =", "= \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header = settings.MEX_BRAND", "coding: utf-8 -*- from django.conf import settings from django.apps import AppConfig from django.contrib.admin", "from django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def", "verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header = settings.MEX_BRAND AdminSite.index_title =", "name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self): AdminSite.site_title = settings.MEX_BRAND AdminSite.site_header =", "import settings from django.apps import AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name", "from django.conf import settings from django.apps import AppConfig from django.contrib.admin import AdminSite class", "django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND def ready(self):", "AppConfig from django.contrib.admin import AdminSite class MexConfig(AppConfig): name = \"mex\" verbose_name = settings.MEX_BRAND" ]
[ "= 0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash,", "of each component \"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining()", "shutdown(self, signum, frame): \"\"\" Terminate threads of each component \"\"\" logger.info('Shuting down') self.stop.set()", "%.4fs', coin, cnt, time_total) return number + 1 def last_processed_block(self, database, coin): \"\"\"", "0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses)", "threading.Event() coins = [] threads = [] database = None notifier = None", ".notifier import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing", "database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number) return number def worker(self, coin):", "thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all components", "coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt = 0 for tx_hash in", "\"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block:", "number <number> :param database: Database object :param coin: Coin object :param number: block", "notifier = None def __init__(self, config): \"\"\" Construct new Monitor object :param config:", "object :param coin: Coin object :param number: block number :return: number of the", "worker(self, coin): \"\"\" Process new blocks of cryptocurrency <coin> until stop event is", "frame): \"\"\" Terminate threads of each component \"\"\" logger.info('Shuting down') self.stop.set() for thread", "block_hash) logger.info('%s: setting %s as last processed block', coin, number) def process_block(self, database,", "for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt", "coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is None: break database.delete_block(coin, number) number", "while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number()", "processing of cryptocurrencies blocks \"\"\" import threading import logging from timeit import default_timer", "- time_start logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total) return number", "= Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin in config['coins']: coin_inst =", "import BTC, BCH, DASH, ZEC, LTC, ETH from .database import Database from .notifier", "Get the last block procesesd of <coin> :param database: Database object :param coin:", "only brief time (5% of block time) before trying again self.stop.wait(timeout=until_next_block) logger.info('%s: terminating',", "Database from .notifier import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls", "coin): \"\"\" Process new blocks of cryptocurrency <coin> until stop event is set.", "in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start", "except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block <", "if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for", "is set. :param coin: a class inherited from Coin \"\"\" database = Database(self.config['db'],", "as timer from datetime import datetime, timedelta from .coin import BTC, BCH, DASH,", "the current block of each coin as the last processed \"\"\" for coin", "controls the processing of cryptocurrencies bloks \"\"\" stop = threading.Event() coins = []", "= None def __init__(self, config): \"\"\" Construct new Monitor object :param config: configuration", "config: configuration dict \"\"\" self.config = config self.database = Database(config['db'], self.config) self.notifier =", "= coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate", "\"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection() for coin in self.coins: if", "block_id, tx_hash, addresses) cnt += 1 time_total = timer() - time_start logger.debug('%s: processed", "from .coin import BTC, BCH, DASH, ZEC, LTC, ETH from .database import Database", ".coin import BTC, BCH, DASH, ZEC, LTC, ETH from .database import Database from", "tx_hash, addresses) cnt += 1 time_total = timer() - time_start logger.debug('%s: processed %d", "database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt = 0 for", "test_connection(self): \"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection() for coin in self.coins:", "threads = [] database = None notifier = None def __init__(self, config): \"\"\"", "coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads", "# wait only brief time (5% of block time) before trying again self.stop.wait(timeout=until_next_block)", "database, coin): \"\"\" Get the last block procesesd of <coin> :param database: Database", "next block \"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s:", "\"\"\" for coin in self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,))", "last processed block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number)", "config self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin in config['coins']:", "database: Database object :param coin: Coin object :param number: block number :return: number", "processing block: %s', coin, number) cnt = 0 for tx_hash in coin.get_block_transactions(): addresses", "+= 1 time_total = timer() - time_start logger.debug('%s: processed %d transactions in %.4fs',", "= database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is", "= coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1 time_total = timer()", "< 0: # should be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() #", "of last processed block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin,", "timeit import default_timer as timer from datetime import datetime, timedelta from .coin import", "def __init__(self, config): \"\"\" Construct new Monitor object :param config: configuration dict \"\"\"", "start(self): \"\"\" Start thread for every coin and notifier \"\"\" for coin in", "signum, frame): \"\"\" Terminate threads of each component \"\"\" logger.info('Shuting down') self.stop.set() for", "DASH, ZEC, LTC, ETH from .database import Database from .notifier import Notifier logger", "None def __init__(self, config): \"\"\" Construct new Monitor object :param config: configuration dict", "args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current block of each coin", "in self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread", "threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current block of each", "Notifier(config, self.database) for coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id']", "= coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if self.stop.is_set(): break try: current_number", "while current_number <= last_number: if self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number)", "Construct new Monitor object :param config: configuration dict \"\"\" self.config = config self.database", "unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every coin and notifier \"\"\" for", "number of last processed block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved =", "return number def worker(self, coin): \"\"\" Process new blocks of cryptocurrency <coin> until", "number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if", "addresses) cnt += 1 time_total = timer() - time_start logger.debug('%s: processed %d transactions", "Monitored designated for processing of cryptocurrencies blocks \"\"\" import threading import logging from", "thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def", "Database object :param coin: Coin object :return: number of last processed block \"\"\"", "connectivity of all components \"\"\" self.notifier.test_connection() for coin in self.coins: if not coin.test_connection():", "last block procesesd of <coin> :param database: Database object :param coin: Coin object", "brief time (5% of block time) before trying again self.stop.wait(timeout=until_next_block) logger.info('%s: terminating', coin)", "break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: #", "time_total) return number + 1 def last_processed_block(self, database, coin): \"\"\" Get the last", "for coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def", "# should be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only", "\"\"\" Process new blocks of cryptocurrency <coin> until stop event is set. :param", "as last processed block', coin, number) def process_block(self, database, coin, number): \"\"\" Process", "object :param config: configuration dict \"\"\" self.config = config self.database = Database(config['db'], self.config)", "Process transaction of <coin> in a block of number <number> :param database: Database", "last processed block', coin, number) def process_block(self, database, coin, number): \"\"\" Process transaction", "cryptocurrency <coin> until stop event is set. :param coin: a class inherited from", "self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number, _ =", "logger.info('%s: setting %s as last processed block', coin, number) def process_block(self, database, coin,", "generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of", "[] database = None notifier = None def __init__(self, config): \"\"\" Construct new", "block of each coin as the last processed \"\"\" for coin in self.coins:", "database = None notifier = None def __init__(self, config): \"\"\" Construct new Monitor", "coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number:", "coin, cnt, time_total) return number + 1 def last_processed_block(self, database, coin): \"\"\" Get", "\"\"\" self.config = config self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for", "processed block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node", "None notifier = None def __init__(self, config): \"\"\" Construct new Monitor object :param", "event is set. :param coin: a class inherited from Coin \"\"\" database =", "= self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads of each component", "self.config = config self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin", "number) number -= 1 #print(\"last_processed_block> \", number) return number def worker(self, coin): \"\"\"", "of the next block \"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number,", "coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\"", "all components \"\"\" self.notifier.test_connection() for coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}:", "is None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number) return number", "inherited from Coin \"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number =", "= timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin,", "def test_connection(self): \"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection() for coin in", "self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number <=", "\"\"\" Construct new Monitor object :param config: configuration dict \"\"\" self.config = config", "database, coin, number): \"\"\" Process transaction of <coin> in a block of number", "\"\"\" Monitor controls the processing of cryptocurrencies bloks \"\"\" stop = threading.Event() coins", "of cryptocurrencies bloks \"\"\" stop = threading.Event() coins = [] threads = []", "\"\"\" import threading import logging from timeit import default_timer as timer from datetime", "logger.info('%s: processing block: %s', coin, number) cnt = 0 for tx_hash in coin.get_block_transactions():", "current block of each coin as the last processed \"\"\" for coin in", "block', coin, number) def process_block(self, database, coin, number): \"\"\" Process transaction of <coin>", "def worker(self, coin): \"\"\" Process new blocks of cryptocurrency <coin> until stop event", "Database object :param coin: Coin object :param number: block number :return: number of", "for processing of cryptocurrencies blocks \"\"\" import threading import logging from timeit import", "Start thread for every coin and notifier \"\"\" for coin in self.coins: logger.info('%s:", "node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every coin and notifier \"\"\"", "import threading import logging from timeit import default_timer as timer from datetime import", "<coin> until stop event is set. :param coin: a class inherited from Coin", "of all components \"\"\" self.notifier.test_connection() for coin in self.coins: if not coin.test_connection(): raise", "number: block number :return: number of the next block \"\"\" time_start = timer()", "Set the current block of each coin as the last processed \"\"\" for", "hash_saved == hash_node or hash_saved is None: break database.delete_block(coin, number) number -= 1", "of <coin> :param database: Database object :param coin: Coin object :return: number of", "if until_next_block < 0: # should be already generated until_next_block = (coin.get_block_time() *", "+ 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if", "coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if", "= database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved", "(coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: # should be already", "raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every coin and", "until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: # should", "in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1", "\"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) +", "wait only brief time (5% of block time) before trying again self.stop.wait(timeout=until_next_block) logger.info('%s:", "= [] database = None notifier = None def __init__(self, config): \"\"\" Construct", "self.config) self.notifier = Notifier(config, self.database) for coin in config['coins']: coin_inst = coin(config, self.stop)", "\"\"\" number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number)", "as the last processed \"\"\" for coin in self.coins: number, block_hash = coin.get_last_block_number()", "from Coin \"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database,", "of each coin as the last processed \"\"\" for coin in self.coins: number,", "in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum,", "= [] threads = [] database = None notifier = None def __init__(self,", "a block of number <number> :param database: Database object :param coin: Coin object", "database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1", "self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1 time_total = timer() - time_start", "datetime, timedelta from .coin import BTC, BCH, DASH, ZEC, LTC, ETH from .database", "time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s',", "hash_saved is None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number) return", "set_last_blocks(self): \"\"\" Set the current block of each coin as the last processed", "from .notifier import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the", "database: Database object :param coin: Coin object :return: number of last processed block", "current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block", "until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block", "%s', coin, number) cnt = 0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash)", "number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last processed", "of cryptocurrency <coin> until stop event is set. :param coin: a class inherited", "BCH, DASH, ZEC, LTC, ETH from .database import Database from .notifier import Notifier", "= threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self):", "the next block \"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash())", "class Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies bloks \"\"\" stop =", "block number :return: number of the next block \"\"\" time_start = timer() coin.get_block(number)", "self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of", "True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node or", "coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every coin", "thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current block", "self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last processed block', coin, number) def", "from .database import Database from .notifier import Notifier logger = logging.getLogger(__name__) class Monitor():", "started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread)", "class inherited from Coin \"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number", "number :return: number of the next block \"\"\" time_start = timer() coin.get_block(number) block_id", "coin and notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring started', coin) thread", "number + 1 def last_processed_block(self, database, coin): \"\"\" Get the last block procesesd", "block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last processed block',", "last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if self.stop.is_set(): break", "monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,))", "coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: # should be already generated until_next_block", "Coin object :param number: block number :return: number of the next block \"\"\"", "Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies bloks \"\"\" stop = threading.Event()", "import datetime, timedelta from .coin import BTC, BCH, DASH, ZEC, LTC, ETH from", "def process_block(self, database, coin, number): \"\"\" Process transaction of <coin> in a block", "+ coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: # should be already generated", "cryptocurrencies blocks \"\"\" import threading import logging from timeit import default_timer as timer", "LTC, ETH from .database import Database from .notifier import Notifier logger = logging.getLogger(__name__)", "module specifies class Monitored designated for processing of cryptocurrencies blocks \"\"\" import threading", "coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last processed block', coin, number)", "import default_timer as timer from datetime import datetime, timedelta from .coin import BTC,", "already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5%", "number) cnt = 0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number,", "time_total = timer() - time_start logger.debug('%s: processed %d transactions in %.4fs', coin, cnt,", "This module specifies class Monitored designated for processing of cryptocurrencies blocks \"\"\" import", "not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number,", "block: %s', coin, number) cnt = 0 for tx_hash in coin.get_block_transactions(): addresses =", "transaction of <coin> in a block of number <number> :param database: Database object", "coin, number) def process_block(self, database, coin, number): \"\"\" Process transaction of <coin> in", "thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection() for", "ZEC, LTC, ETH from .database import Database from .notifier import Notifier logger =", "in %.4fs', coin, cnt, time_total) return number + 1 def last_processed_block(self, database, coin):", "class Monitored designated for processing of cryptocurrencies blocks \"\"\" import threading import logging", "+ 1 def last_processed_block(self, database, coin): \"\"\" Get the last block procesesd of", "\"\"\" Start thread for every coin and notifier \"\"\" for coin in self.coins:", "ETH from .database import Database from .notifier import Notifier logger = logging.getLogger(__name__) class", "<coin> :param database: Database object :param coin: Coin object :return: number of last", "self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread", "the processing of cryptocurrencies bloks \"\"\" stop = threading.Event() coins = [] threads", "threads of each component \"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join()", "self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current block of each coin as", "database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved ==", "last processed \"\"\" for coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number,", "dict \"\"\" self.config = config self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database)", "last_number) while current_number <= last_number: if self.stop.is_set(): break try: current_number = self.process_block(database, coin,", "\"\"\" self.notifier.test_connection() for coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__))", "== hash_node or hash_saved is None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block>", "current_number = self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while", "coin): \"\"\" Get the last block procesesd of <coin> :param database: Database object", "logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies bloks", "self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin in config['coins']: coin_inst", "coin: a class inherited from Coin \"\"\" database = Database(self.config['db'], self.config) while not", "for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all", "configuration dict \"\"\" self.config = config self.database = Database(config['db'], self.config) self.notifier = Notifier(config,", "in a block of number <number> :param database: Database object :param coin: Coin", "Process new blocks of cryptocurrency <coin> until stop event is set. :param coin:", "#print(\"last_processed_block> \", number) return number def worker(self, coin): \"\"\" Process new blocks of", "and notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring started', coin) thread =", "for coin in self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread)", "#!/usr/bin/env python3 \"\"\" This module specifies class Monitored designated for processing of cryptocurrencies", "BTC, BCH, DASH, ZEC, LTC, ETH from .database import Database from .notifier import", "self.process_block(database, coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds()", "Test connectivity of all components \"\"\" self.notifier.test_connection() for coin in self.coins: if not", "_ = coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if self.stop.is_set(): break try:", "self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread =", "args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set", "= Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number,", "processed %d transactions in %.4fs', coin, cnt, time_total) return number + 1 def", "number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is None: break", "Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number, _", "datetime.now()).total_seconds() if until_next_block < 0: # should be already generated until_next_block = (coin.get_block_time()", "* 0.05).total_seconds() # wait only brief time (5% of block time) before trying", "block procesesd of <coin> :param database: Database object :param coin: Coin object :return:", "= database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt = 0", "= threading.Event() coins = [] threads = [] database = None notifier =", "<coin> in a block of number <number> :param database: Database object :param coin:", ":return: number of the next block \"\"\" time_start = timer() coin.get_block(number) block_id =", "cryptocurrencies bloks \"\"\" stop = threading.Event() coins = [] threads = [] database", "import Database from .notifier import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor", "timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number)", "import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing of", "procesesd of <coin> :param database: Database object :param coin: Coin object :return: number", "Monitor controls the processing of cryptocurrencies bloks \"\"\" stop = threading.Event() coins =", "designated for processing of cryptocurrencies blocks \"\"\" import threading import logging from timeit", "coins = [] threads = [] database = None notifier = None def", "from datetime import datetime, timedelta from .coin import BTC, BCH, DASH, ZEC, LTC,", "<number> :param database: Database object :param coin: Coin object :param number: block number", "number of the next block \"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin,", "of number <number> :param database: Database object :param coin: Coin object :param number:", "return number + 1 def last_processed_block(self, database, coin): \"\"\" Get the last block", "self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection() for coin", "new blocks of cryptocurrency <coin> until stop event is set. :param coin: a", "\"\"\" Process transaction of <coin> in a block of number <number> :param database:", "Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies", "current_number <= last_number: if self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number) except", "time_start logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total) return number +", "object :param coin: Coin object :return: number of last processed block \"\"\" number", "number) return number def worker(self, coin): \"\"\" Process new blocks of cryptocurrency <coin>", "stop = threading.Event() coins = [] threads = [] database = None notifier", "for coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self):", "\"\"\" Get the last block procesesd of <coin> :param database: Database object :param", "processed block', coin, number) def process_block(self, database, coin, number): \"\"\" Process transaction of", "config): \"\"\" Construct new Monitor object :param config: configuration dict \"\"\" self.config =", "be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time", "block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt =", "def last_processed_block(self, database, coin): \"\"\" Get the last block procesesd of <coin> :param", "= coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is None: break database.delete_block(coin, number)", "coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\"", "each coin as the last processed \"\"\" for coin in self.coins: number, block_hash", "threading import logging from timeit import default_timer as timer from datetime import datetime,", "\"\"\" This module specifies class Monitored designated for processing of cryptocurrencies blocks \"\"\"", ":param database: Database object :param coin: Coin object :param number: block number :return:", "until stop event is set. :param coin: a class inherited from Coin \"\"\"", "1 time_total = timer() - time_start logger.debug('%s: processed %d transactions in %.4fs', coin,", "\"\"\" stop = threading.Event() coins = [] threads = [] database = None", ".database import Database from .notifier import Notifier logger = logging.getLogger(__name__) class Monitor(): \"\"\"", "= self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number", "\"\"\" Set the current block of each coin as the last processed \"\"\"", "self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all components \"\"\" self.notifier.test_connection()", "tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt +=", "None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number) return number def", "<= last_number: if self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number) except InterruptedError:", "def shutdown(self, signum, frame): \"\"\" Terminate threads of each component \"\"\" logger.info('Shuting down')", "thread.start() def set_last_blocks(self): \"\"\" Set the current block of each coin as the", "0.05).total_seconds() # wait only brief time (5% of block time) before trying again", "__init__(self, config): \"\"\" Construct new Monitor object :param config: configuration dict \"\"\" self.config", "number): \"\"\" Process transaction of <coin> in a block of number <number> :param", "last_number: if self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number) except InterruptedError: break", "\"\"\" Terminate threads of each component \"\"\" logger.info('Shuting down') self.stop.set() for thread in", "def set_last_blocks(self): \"\"\" Set the current block of each coin as the last", "stop event is set. :param coin: a class inherited from Coin \"\"\" database", "not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every", "\", number) return number def worker(self, coin): \"\"\" Process new blocks of cryptocurrency", "coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1 time_total", "self.database) for coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst)", "of <coin> in a block of number <number> :param database: Database object :param", "new Monitor object :param config: configuration dict \"\"\" self.config = config self.database =", "the last block procesesd of <coin> :param database: Database object :param coin: Coin", "[] threads = [] database = None notifier = None def __init__(self, config):", "for every coin and notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring started',", "coin, number) cnt = 0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin,", "hash_node = coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is None: break database.delete_block(coin,", "self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads of each component \"\"\"", "Monitor object :param config: configuration dict \"\"\" self.config = config self.database = Database(config['db'],", "specifies class Monitored designated for processing of cryptocurrencies blocks \"\"\" import threading import", "(coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block time) before", "of cryptocurrencies blocks \"\"\" import threading import logging from timeit import default_timer as", "coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self,", "timer from datetime import datetime, timedelta from .coin import BTC, BCH, DASH, ZEC,", "bloks \"\"\" stop = threading.Event() coins = [] threads = [] database =", ":param coin: Coin object :param number: block number :return: number of the next", ":param database: Database object :param coin: Coin object :return: number of last processed", "logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies bloks \"\"\" stop", "from timeit import default_timer as timer from datetime import datetime, timedelta from .coin", "or hash_saved is None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number)", "down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity", "thread for every coin and notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring", "threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\"", "\"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\"", "notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker,", "number def worker(self, coin): \"\"\" Process new blocks of cryptocurrency <coin> until stop", "number, block_id, tx_hash, addresses) cnt += 1 time_total = timer() - time_start logger.debug('%s:", "self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the", "while True: hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node", "blocks \"\"\" import threading import logging from timeit import default_timer as timer from", "\"\"\" for coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s:", "self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last", "if self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number) except InterruptedError: break until_next_block", "self.stop.is_set(): break try: current_number = self.process_block(database, coin, current_number) except InterruptedError: break until_next_block =", "processing of cryptocurrencies bloks \"\"\" stop = threading.Event() coins = [] threads =", "self.notifier.test_connection() for coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def", "self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads of each component \"\"\" logger.info('Shuting", ":param coin: a class inherited from Coin \"\"\" database = Database(self.config['db'], self.config) while", "= (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block time)", "hash_saved = database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node or hash_saved", "coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt", "coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s", "cnt, time_total) return number + 1 def last_processed_block(self, database, coin): \"\"\" Get the", "coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start()", "number, block_hash) logger.info('%s: setting %s as last processed block', coin, number) def process_block(self,", "config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame):", "timer() - time_start logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total) return", "break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \", number) return number def worker(self,", "object :param number: block number :return: number of the next block \"\"\" time_start", ":param config: configuration dict \"\"\" self.config = config self.database = Database(config['db'], self.config) self.notifier", "current_number = self.process_block(database, coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time()", "number -= 1 #print(\"last_processed_block> \", number) return number def worker(self, coin): \"\"\" Process", "should be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief", "each component \"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def", "every coin and notifier \"\"\" for coin in self.coins: logger.info('%s: monitoring started', coin)", "thread.start() thread = threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current", "self.stop.is_set(): current_number = self.last_processed_block(database, coin) + 1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number)", "0: # should be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait", "1 def last_processed_block(self, database, coin): \"\"\" Get the last block procesesd of <coin>", "number, coin.get_block_hash()) logger.info('%s: processing block: %s', coin, number) cnt = 0 for tx_hash", "a class inherited from Coin \"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set():", "transactions in %.4fs', coin, cnt, time_total) return number + 1 def last_processed_block(self, database,", "component \"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self):", "coin, number): \"\"\" Process transaction of <coin> in a block of number <number>", "timedelta from .coin import BTC, BCH, DASH, ZEC, LTC, ETH from .database import", "coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1 time_total = timer() -", "try: current_number = self.process_block(database, coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() +", "until_next_block < 0: # should be already generated until_next_block = (coin.get_block_time() * 0.05).total_seconds()", "Coin \"\"\" database = Database(self.config['db'], self.config) while not self.stop.is_set(): current_number = self.last_processed_block(database, coin)", "-= 1 #print(\"last_processed_block> \", number) return number def worker(self, coin): \"\"\" Process new", "coin: Coin object :return: number of last processed block \"\"\" number = database.get_last_block_number(coin)", "self.stop) coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads of", "logging from timeit import default_timer as timer from datetime import datetime, timedelta from", "block of number <number> :param database: Database object :param coin: Coin object :param", "if hash_saved == hash_node or hash_saved is None: break database.delete_block(coin, number) number -=", "coin as the last processed \"\"\" for coin in self.coins: number, block_hash =", "%d transactions in %.4fs', coin, cnt, time_total) return number + 1 def last_processed_block(self,", "1 last_number, _ = coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if self.stop.is_set():", "number) def process_block(self, database, coin, number): \"\"\" Process transaction of <coin> in a", "cnt = 0 for tx_hash in coin.get_block_transactions(): addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id,", "addresses = coin.get_transaction_io(tx_hash) self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses) cnt += 1 time_total =", "Terminate threads of each component \"\"\" logger.info('Shuting down') self.stop.set() for thread in self.threads:", "Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin in config['coins']: coin_inst = coin(config,", "object :return: number of last processed block \"\"\" number = database.get_last_block_number(coin) while True:", "= self.process_block(database, coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() -", ":param coin: Coin object :return: number of last processed block \"\"\" number =", "= timer() - time_start logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total)", "database.get_block_hash(coin, number) hash_node = coin.get_block_hash(number) if hash_saved == hash_node or hash_saved is None:", "%s as last processed block', coin, number) def process_block(self, database, coin, number): \"\"\"", "= logging.getLogger(__name__) class Monitor(): \"\"\" Monitor controls the processing of cryptocurrencies bloks \"\"\"", "= config self.database = Database(config['db'], self.config) self.notifier = Notifier(config, self.database) for coin in", "cnt += 1 time_total = timer() - time_start logger.debug('%s: processed %d transactions in", "coin.get_last_block_number() #print(current_number, last_number) while current_number <= last_number: if self.stop.is_set(): break try: current_number =", "logger.info('Shuting down') self.stop.set() for thread in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test", "import logging from timeit import default_timer as timer from datetime import datetime, timedelta", "blocks of cryptocurrency <coin> until stop event is set. :param coin: a class", "= threading.Thread(target=self.notifier.worker, args=(self.stop,)) self.threads.append(thread) thread.start() def set_last_blocks(self): \"\"\" Set the current block of", "- datetime.now()).total_seconds() if until_next_block < 0: # should be already generated until_next_block =", "#print(current_number, last_number) while current_number <= last_number: if self.stop.is_set(): break try: current_number = self.process_block(database,", "= None notifier = None def __init__(self, config): \"\"\" Construct new Monitor object", "last_processed_block(self, database, coin): \"\"\" Get the last block procesesd of <coin> :param database:", ":return: number of last processed block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved", "InterruptedError: break until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0:", "process_block(self, database, coin, number): \"\"\" Process transaction of <coin> in a block of", "block \"\"\" time_start = timer() coin.get_block(number) block_id = database.insert_block(coin, number, coin.get_block_hash()) logger.info('%s: processing", "coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id'] self.coins.append(coin_inst) def shutdown(self, signum, frame): \"\"\" Terminate threads of each", "datetime import datetime, timedelta from .coin import BTC, BCH, DASH, ZEC, LTC, ETH", "= Notifier(config, self.database) for coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id =", "coin: Coin object :param number: block number :return: number of the next block", "logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total) return number + 1", "set. :param coin: a class inherited from Coin \"\"\" database = Database(self.config['db'], self.config)", "in self.threads: thread.join() self.notifier.process_remaining() def test_connection(self): \"\"\" Test connectivity of all components \"\"\"", "setting %s as last processed block', coin, number) def process_block(self, database, coin, number):", "1 #print(\"last_processed_block> \", number) return number def worker(self, coin): \"\"\" Process new blocks", "coin in self.coins: logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start()", "break try: current_number = self.process_block(database, coin, current_number) except InterruptedError: break until_next_block = (coin.get_block_creation_time()", "self.notifier = Notifier(config, self.database) for coin in config['coins']: coin_inst = coin(config, self.stop) coin_inst.db_id", "= coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as last processed block', coin,", "for coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting", ":param number: block number :return: number of the next block \"\"\" time_start =", "logger.info('%s: monitoring started', coin) thread = threading.Thread(target=self.worker, args=(coin,)) self.threads.append(thread) thread.start() thread = threading.Thread(target=self.notifier.worker,", "default_timer as timer from datetime import datetime, timedelta from .coin import BTC, BCH,", "the last processed \"\"\" for coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin,", "in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash) logger.info('%s: setting %s as", "= (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds() if until_next_block < 0: # should be", "def start(self): \"\"\" Start thread for every coin and notifier \"\"\" for coin", "ConnectionError('{}: node unreachable'.format(coin.__class__.__name__)) def start(self): \"\"\" Start thread for every coin and notifier", "block \"\"\" number = database.get_last_block_number(coin) while True: hash_saved = database.get_block_hash(coin, number) hash_node =", "components \"\"\" self.notifier.test_connection() for coin in self.coins: if not coin.test_connection(): raise ConnectionError('{}: node", "python3 \"\"\" This module specifies class Monitored designated for processing of cryptocurrencies blocks", "hash_node or hash_saved is None: break database.delete_block(coin, number) number -= 1 #print(\"last_processed_block> \",", "Coin object :return: number of last processed block \"\"\" number = database.get_last_block_number(coin) while", "processed \"\"\" for coin in self.coins: number, block_hash = coin.get_last_block_number() self.database.insert_block(coin, number, block_hash)" ]
[ "OpenCV Stream sender example This example is made to be run from one", "for connections from receivers. \"\"\" import cv2 import queue from peerpy import Peer,", "True: ret, frame = cam.read() if not ret: print(\"Failed grabbing camera frame\") break", "without installing the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream", "peerpy import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True:", "import sys import os # allow the example to be run without installing", "made to be run from one python shell, waiting for connections from receivers.", "cam.read() if not ret: print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1) if", "queue from peerpy import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer:", "= cv2.waitKey(1) if k % 256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame)", "if not ret: print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1) if k", "package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This", "= cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret, frame = cam.read() if", "frame = cam.read() if not ret: print(\"Failed grabbing camera frame\") break k =", "import cv2 import queue from peerpy import Peer, protocol cam = cv2.VideoCapture(0) with", "sys import os # allow the example to be run without installing the", "import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret,", "from peerpy import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while", "cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret, frame = cam.read() if not", "protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret, frame =", "= cam.read() if not ret: print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1)", "be run without installing the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\"", "break k = cv2.waitKey(1) if k % 256 == 27: print(\"Escape hit, closing...\")", "allow the example to be run without installing the package, from this repository's", "from one python shell, waiting for connections from receivers. \"\"\" import cv2 import", "os # allow the example to be run without installing the package, from", "sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example is made to be run", "to be run from one python shell, waiting for connections from receivers. \"\"\"", "not ret: print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1) if k %", "directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example is made to be", "the example to be run without installing the package, from this repository's root", "cv2.waitKey(1) if k % 256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame) #cv2.imshow(\"Webcam\",", "installing the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender", "the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example", "receivers. \"\"\" import cv2 import queue from peerpy import Peer, protocol cam =", "cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret, frame = cam.read()", "This example is made to be run from one python shell, waiting for", "\"\"\" import cv2 import queue from peerpy import Peer, protocol cam = cv2.VideoCapture(0)", "<filename>examples/opencv_stream/sender.py import sys import os # allow the example to be run without", "ret, frame = cam.read() if not ret: print(\"Failed grabbing camera frame\") break k", "as peer: while True: ret, frame = cam.read() if not ret: print(\"Failed grabbing", "peer: while True: ret, frame = cam.read() if not ret: print(\"Failed grabbing camera", "Stream sender example This example is made to be run from one python", "example This example is made to be run from one python shell, waiting", "from receivers. \"\"\" import cv2 import queue from peerpy import Peer, protocol cam", "k = cv2.waitKey(1) if k % 256 == 27: print(\"Escape hit, closing...\") break", "k % 256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame) #cv2.imshow(\"Webcam\", frame) cam.release()", "\"\"\" OpenCV Stream sender example This example is made to be run from", "is made to be run from one python shell, waiting for connections from", "shell, waiting for connections from receivers. \"\"\" import cv2 import queue from peerpy", "cv2 import queue from peerpy import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1)", "# allow the example to be run without installing the package, from this", "one python shell, waiting for connections from receivers. \"\"\" import cv2 import queue", "example to be run without installing the package, from this repository's root directory", "example is made to be run from one python shell, waiting for connections", "run from one python shell, waiting for connections from receivers. \"\"\" import cv2", "ret: print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1) if k % 256", "frame\") break k = cv2.waitKey(1) if k % 256 == 27: print(\"Escape hit,", "repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example is made", "python shell, waiting for connections from receivers. \"\"\" import cv2 import queue from", "import os # allow the example to be run without installing the package,", "this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example is", "with Peer(timeout=1) as peer: while True: ret, frame = cam.read() if not ret:", "256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame) #cv2.imshow(\"Webcam\", frame) cam.release() # cv2.destroyAllWindows()", "connections from receivers. \"\"\" import cv2 import queue from peerpy import Peer, protocol", "sender example This example is made to be run from one python shell,", "import queue from peerpy import Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as", "waiting for connections from receivers. \"\"\" import cv2 import queue from peerpy import", "be run from one python shell, waiting for connections from receivers. \"\"\" import", "while True: ret, frame = cam.read() if not ret: print(\"Failed grabbing camera frame\")", "root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example is made to", "print(\"Failed grabbing camera frame\") break k = cv2.waitKey(1) if k % 256 ==", "from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV Stream sender example This example", "grabbing camera frame\") break k = cv2.waitKey(1) if k % 256 == 27:", "if k % 256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame) #cv2.imshow(\"Webcam\", frame)", "Peer, protocol cam = cv2.VideoCapture(0) with Peer(timeout=1) as peer: while True: ret, frame", "% 256 == 27: print(\"Escape hit, closing...\") break peer.broadcast(frame) #cv2.imshow(\"Webcam\", frame) cam.release() #", "Peer(timeout=1) as peer: while True: ret, frame = cam.read() if not ret: print(\"Failed", "camera frame\") break k = cv2.waitKey(1) if k % 256 == 27: print(\"Escape", "to be run without installing the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.')))", "run without installing the package, from this repository's root directory sys.path.append(os.path.abspath(os.path.join('.'))) \"\"\" OpenCV" ]
[ "app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if", "{} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name,", "Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the connections", "size is maximum_window_size, and the current event is in the last position of", "sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*')", "check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread used it will be stored-------'''", "1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE]", "str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE", "-1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as", "import traceback import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE']", "a list of activities from the same case_id of current event(another event), size", "-1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*')", "if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*')", "except Exception as ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] =", "streaming_event_compliance import app from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata import", "except Exception as ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing:", "sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory =", "from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import sys check_executing_order = {}", "check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread used it", "event(another event), size is maximum_window_size, and the current event is in the last", "= {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name", "the windowsMemory. :param windowsMemory: :`list` a list of activities from the same case_id", "ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\")", "will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing:", "run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring", "else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread used", "Before releasing lock, which thread used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE])", "calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with", "sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)):", "sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec else: lock =", "+ 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%'", "-1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE',", "import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import sys check_executing_order", "case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception:", "if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1]", "is in the last position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\"", "+ 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if", "streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import sys", "ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing lock, which thread", "check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread", "\"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate", "0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as", "last position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in", "'''--------For Testing: Before releasing lock, which thread used it will be stored-------''' if", "same case_id of current event(another event), size is maximum_window_size, and the current event", "This function will calculate the connections with different size for the windowsMemory. :param", "Exception as ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before", "ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing lock,", "ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the connections with different", "windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') ==", "windows_memory): global check_executing_order '''--------For Testing: Before releasing lock, which thread used it will", "traceback import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def", "raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the connections with", "CL, T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging", "list of activities from the same case_id of current event(another event), size is", "source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1:", "releasing lock, which thread used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else:", "def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id,", "MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] ==", "event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws:", "== windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE])", "windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error", "windowsMemory. :param windowsMemory: :`list` a list of activities from the same case_id of", "ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0:", "import app from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata import automata", "gVars, CL, T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from", "app from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata import automata from", "with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the", "- ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1])", "import ServerLogging import threading import traceback import sys check_executing_order = {} WINDOW_SIZE =", "= ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if", "-1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec else:", "gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec else: lock", "sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec def executing_order4test(case_id, windows_memory):", "calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the connections with different size for", "= [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread used it will", "ServerLogging import threading import traceback import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE']", "== -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*') == -1:", "ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This", "elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*')", "of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node", "= app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try:", "\"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except", "== -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif", "windowsMemory: :`list` a list of activities from the same case_id of current event(another", "C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id,", "Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description:", "gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node,", "lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\",", "of current event(another event), size is maximum_window_size, and the current event is in", "streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import sys check_executing_order = {} WINDOW_SIZE", "def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing lock, which thread used", "= C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id,", "is maximum_window_size, and the current event is in the last position of the", "app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\",", "1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and", "lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released", "source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1))", "- ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire():", "MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node,", "-1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release()", "as ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing", "CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') ==", "sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1:", "MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire():", "import threading import traceback import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE", "else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if", "sink_node)).release() except Exception as ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node]", "streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import", "case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will", "the connections with different size for the windowsMemory. :param windowsMemory: :`list` a list", "T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import", ":`list` a list of activities from the same case_id of current event(another event),", "ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE -", "'~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1", "windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node", "case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc())", "windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE", "used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE])", "try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif", "raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire():", "maximum_window_size, and the current event is in the last position of the windowsMemory", "raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing lock, which", "sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name", "from streaming_event_compliance import app from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata", "1)) elif source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node,", "for the windowsMemory. :param windowsMemory: :`list` a list of activities from the same", "CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%',", "ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release()", "which thread used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] =", "CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node,", "threading import traceback import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE =", "\"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def", "will calculate the connections with different size for the windowsMemory. :param windowsMemory: :`list`", "streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading", "windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\")", "= lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') ==", "elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec:", "gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec def executing_order4test(case_id,", "ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node,", "1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\",", "the last position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws", "\"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node =", "executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id,", "the same case_id of current event(another event), size is maximum_window_size, and the current", "event is in the last position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]).", "for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE", "0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and", "lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE]", "threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%'", "func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\")", "1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec def executing_order4test(case_id, windows_memory): global", "check_executing_order '''--------For Testing: Before releasing lock, which thread used it will be stored-------'''", "Testing: Before releasing lock, which thread used it will be stored-------''' if check_executing_order.get(case_id):", "= ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE", "connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory):", "ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node, sink_node)).acquire(): try:", "source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise", "thread used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = []", "in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws", "-1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec def", "import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import", "if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node,", "from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import", "CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order", "sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1))", "!= -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception", "case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\",", "','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE +", "<filename>streaming_event_compliance/services/build_automata/case_thread.py<gh_stars>1-10 from streaming_event_compliance import app from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from", "function will calculate the connections with different size for the windowsMemory. :param windowsMemory:", "MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory)", "\"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name,", "if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock,", "as ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if", "size for the windowsMemory. :param windowsMemory: :`list` a list of activities from the", "global check_executing_order '''--------For Testing: Before releasing lock, which thread used it will be", "ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\")", "automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback", "in the last position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for", "and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec:", "current event(another event), size is maximum_window_size, and the current event is in the", "executing_order4test(case_id, windows_memory): global check_executing_order '''--------For Testing: Before releasing lock, which thread used it", "[] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which thread used it will be", "ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import sys check_executing_order =", "elif source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release()", "CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and", "gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except", "be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before", "','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if CL.lock_list.get((source_node, sink_node)): if CL.lock_list.get((source_node,", "\"server\", case_id, \"Calculating connections\") except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise", "import sys check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id):", "= sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory", "sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0))", "Description: This function will calculate the connections with different size for the windowsMemory.", "def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function will calculate the connections with different size", "\"\"\" Description: This function will calculate the connections with different size for the", "try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE +", "== '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1:", "+ 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name,", "1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec else: lock = threading.RLock()", "Exception as ec: raise ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock", "from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging import threading import traceback import", "= app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread())) try: if C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name,", "\"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\" Description: This function", "stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing", "\"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name,", "== -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node,", "sink_node)).release() except Exception as ec: raise ec def executing_order4test(case_id, windows_memory): global check_executing_order '''--------For", "'~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node,", "of activities from the same case_id of current event(another event), size is maximum_window_size,", "== -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec", "lock, which thread used it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id]", "the current event is in the last position of the windowsMemory (i.e. event", "\"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id,", "source_node.find('*') != -1 and sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except", "= threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] ==", "lock if CL.lock_list.get((source_node, sink_node)).acquire(): try: if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1:", "C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException from streaming_event_compliance.objects.logging.server_logging import ServerLogging", "ec else: lock = threading.RLock() CL.lock_list[source_node, sink_node] = lock if CL.lock_list.get((source_node, sink_node)).acquire(): try:", "different size for the windowsMemory. :param windowsMemory: :`list` a list of activities from", "import gVars, CL, T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception import ThreadException", "position of the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE:", "activities from the same case_id of current event(another event), size is maximum_window_size, and", "current event is in the last position of the windowsMemory (i.e. event ==", "calculate the connections with different size for the windowsMemory. :param windowsMemory: :`list` a", "source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') != -1 and sink_node.find('*') ==", "== -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise ec", "the windowsMemory (i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node =", "event), size is maximum_window_size, and the current event is in the last position", "except Exception: ServerLogging().log_error(func_name, \"server\", case_id, \"Error with Caselock\") raise ThreadException(traceback.format_exc()) def calculate_connection_for_different_prefix_automata(windowsMemory): \"\"\"", "from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C from streaming_event_compliance.objects.automata import automata from streaming_event_compliance.objects.exceptions.exception", "(i.e. event == windowsMemory[maximum_window_size]). \"\"\" for ws in WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE -", "sink_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception as ec: raise", "and the current event is in the last position of the windowsMemory (i.e.", "case_id of current event(another event), size is maximum_window_size, and the current event is", "and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node,", "'~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) CL.lock_list.get((source_node, sink_node)).release() except Exception", "from the same case_id of current event(another event), size is maximum_window_size, and the", "C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory) calculate_connection_for_different_prefix_automata(windows_memory) ServerLogging().log_info(func_name, \"server\", case_id, \"Calculating", "it will be stored-------''' if check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For", "check_executing_order = {} WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name =", "C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0) C.lock_List.get(case_id).release() ServerLogging().log_info(func_name, \"server\", case_id, \"Released lock\") executing_order4test(case_id, windows_memory)", "check_executing_order.get(case_id): check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) else: check_executing_order[case_id] = [] check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE]) '''--------For Testing: Before releasing lock, which", "if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*')", ":param windowsMemory: :`list` a list of activities from the same case_id of current", "with different size for the windowsMemory. :param windowsMemory: :`list` a list of activities", "ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1]) if", "connections with different size for the windowsMemory. :param windowsMemory: :`list` a list of", "gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0)) elif source_node.find('*') == -1: gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1)) elif source_node.find('*') !=", "WINDOW_SIZE: source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE]) sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws +", "C.lock_List.get(case_id).acquire(): ServerLogging().log_info(func_name, \"server\", case_id, \"Acquiring lock\") windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1] C.dictionary_cases.get(case_id).pop(0)", "WINDOW_SIZE = app.config['WINDOW_SIZE'] MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE'] def run_build(case_id): func_name = sys._getframe().f_code.co_name ServerLogging().log_info(func_name, str(threading.current_thread()))" ]
[ "<reponame>zdenek-nemec/python-tutorial-socratica import functools numbers = list(range(1, 11)) print(numbers) product = functools.reduce(lambda x, y:", "functools numbers = list(range(1, 11)) print(numbers) product = functools.reduce(lambda x, y: x*y, numbers)", "import functools numbers = list(range(1, 11)) print(numbers) product = functools.reduce(lambda x, y: x*y,", "numbers = list(range(1, 11)) print(numbers) product = functools.reduce(lambda x, y: x*y, numbers) print(product)" ]
[ "def pdf(func): \"\"\" decorator to save all plots generated by func to pdf", "hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a healpy map zoom:", "range of zoom-in map rot: center of zoomed in map lcoord: label of", "not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None)", "from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import", "# Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of", "- target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max())", "matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot", "on a healpy map zoom: indicates zoomed-in cartview lonra: longitude range of zoomed-in", "confidence levels \"\"\" if bins is None: bins = int(np.sqrt(len(x))) # Make a", "lonlat coordinates for labels lons = np.arange(-150, 181, 30) lats = np.arange(-90, 91,", "and accounted for. \"\"\" lb, rt = curr_lim if lb <= rt: #", "return ret else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether", "not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D", "zoom: # location of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0]", "coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for labels lons", "indicates zoomed-in cartview lonra: longitude range of zoomed-in map latra: latitude range of", "in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5,", "import functools import numpy as np from scipy import optimize from matplotlib.backends.backend_pdf import", "count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls]", "else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord, xy=(0.8, -0.05), size=\"medium\",", "rt = curr_lim if lb <= rt: # normal ordering combined = sorted(curr_lim+bounds)", "the norm of the sum # Take histogram bin membership as proportional to", "axis limit does not overlap with (xlim, ylim), the new limits are set", "np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30) # actual text at those", "map zoom: indicates zoomed-in cartview lonra: longitude range of zoomed-in map latra: latitude", "figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is None: figs", "with (xlim, ylim), the new limits are set to (xlim, ylim). Otherwise limits", "location of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat", "if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68),", "curr_lim if lb <= rt: # normal ordering combined = sorted(curr_lim+bounds) # no", "xlim and ylim if they exceed the bounds (xlim, ylim). If the axis", "return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a", "text at those coordinates if coord == COORD.det: llats = 90-lats else: llats", "if zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\",", "coordinate labels ax = plt.gca() if zoom: # location of other, fixed coordinate", "through. *ncolors* specifies the number of different colors to use \"\"\" cmap =", "= sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:])", "= [plt.figure(n) for n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close()", "based on bounds or the current limit. Reverse order (i.e. left > right)", "def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the axis limits", "different colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for", "xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y,", "(xlim, ylim), the new limits are set to (xlim, ylim). Otherwise limits are", "'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs", "plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return", "= 0 # lonlat coordinates for labels lons = np.arange(-150, 181, 30) lats", "of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates", "labels ax = plt.gca() if zoom: # location of other, fixed coordinate lon_offset", "save all plots generated by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs):", "y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save all plots generated by", "or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax,", "# normal ordering combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb,", "Find the norm of the sum # Take histogram bin membership as proportional", "norm of the sum # Take histogram bin membership as proportional to Likelihood", "lon_offset = -180 lat_offset = 0 # lonlat coordinates for labels lons =", "limit exceeds the bounds and returns the appropriate new limits based on bounds", "coordinates if coord == COORD.det: llats = 90-lats else: llats = lats #", "fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot", "= plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors", "H[w] return count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl", "reverse=True) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb,", "H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the sum # Take histogram bin", "levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a healpy", "cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\"", "w = np.where(H>limit) count = H[w] return count.sum() - target levels = [optimize.bisect(objective,", "1 to keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs)", "def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values of the", "limits to xlim and ylim if they exceed the bounds (xlim, ylim). If", "90-lats else: llats = lats # white outline around text pe = [path_effects.Stroke(linewidth=1.5,", "coordinates for labels lons = np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30)", "return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib", "coordinates on a healpy map zoom: indicates zoomed-in cartview lonra: longitude range of", "process def objective(limit, target): w = np.where(H>limit) count = H[w] return count.sum() -", "figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line", "colors as the data is stepped through. *ncolors* specifies the number of different", "is stepped through. *ncolors* specifies the number of different colors to use \"\"\"", "tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return", "bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*,", "data is stepped through. *ncolors* specifies the number of different colors to use", "healpy as hp # coordinate labels ax = plt.gca() if zoom: # location", "combined[1], combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap", "lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\")", "from matplotlib import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from", "fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for labels", "is allowed and accounted for. \"\"\" lb, rt = curr_lim if lb <=", "true when data comes from a Markovian process def objective(limit, target): w =", "'pdffile' in kwargs and kwargs['pdffile'] is not None: plt.close('all') ret = func(*args, **kwargs)", "llats = 90-lats else: llats = lats # white outline around text pe", "= i*chunksize # add 1 to keep lines connected high = min((i+1)*chunksize+1, len(x))", "outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats):", "levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels", "lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset =", "normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the sum # Take", "a matplotlib axis *ax*, restricts the axis limits to xlim and ylim if", "COORD.det: llats = 90-lats else: llats = lats # white outline around text", "rot[1]+latra[0] # lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats =", "low = i*chunksize # add 1 to keep lines connected high = min((i+1)*chunksize+1,", "lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\"", "latitude range of zoom-in map rot: center of zoomed in map lcoord: label", "i in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize # add 1 to", "map latra: latitude range of zoom-in map rot: center of zoomed in map", "y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values of the pdf corresponding", "in map lcoord: label of coordinate system \"\"\" import healpy as hp #", "combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the", "\"\"\" labels coordinates on a healpy map zoom: indicates zoomed-in cartview lonra: longitude", "bounds[0], bounds[1] return combined[1], combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True)", "by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs", "current limit. Reverse order (i.e. left > right) is allowed and accounted for.", "len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save all plots", "(i.e. left > right) is allowed and accounted for. \"\"\" lb, rt =", "ordering combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb, rt) or", "= np.arange(-90, 91, 30) # actual text at those coordinates if coord ==", "H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None,", "pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is", "lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord, xy=(0.8,", "\"\"\"Given a matplotlib axis *ax*, restricts the axis limits to xlim and ylim", "for fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs):", "when data comes from a Markovian process def objective(limit, target): w = np.where(H>limit)", "llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in lons: hp.projtext(_,", "cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize =", "colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which the lines", "*ncolors* specifies the number of different colors to use \"\"\" cmap = plt.get_cmap(cmapname)", "of zoom-in map rot: center of zoomed in map lcoord: label of coordinate", "plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r',", "multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks", "accounted for. \"\"\" lb, rt = curr_lim if lb <= rt: # normal", "text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0],", "axis *ax*, restricts the axis limits to xlim and ylim if they exceed", "bounds[1] return combined[1], combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True) #", "\"\"\"given 2D datapoints, return values of the pdf corresponding to the passed confidence", "0.68), bins=None): \"\"\"given 2D datapoints, return values of the pdf corresponding to the", "color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save all plots generated by func", "'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if", "levels \"\"\" if bins is None: bins = int(np.sqrt(len(x))) # Make a 2d", "collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial',", "None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def", "<reponame>tianluyuan/pyutils<gh_stars>1-10 import functools import numpy as np from scipy import optimize from matplotlib.backends.backend_pdf", "is None: bins = int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum()", "values of the pdf corresponding to the passed confidence levels \"\"\" if bins", "pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]),", "to the passed confidence levels \"\"\" if bins is None: bins = int(np.sqrt(len(x)))", "ylim), the new limits are set to (xlim, ylim). Otherwise limits are kept", "combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:])", "# coordinate labels ax = plt.gca() if zoom: # location of other, fixed", "namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200):", "This is true when data comes from a Markovian process def objective(limit, target):", "the bounds and returns the appropriate new limits based on bounds or the", "== (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None):", "histogram bin membership as proportional to Likelihood # This is true when data", "gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\"", "map lcoord: label of coordinate system \"\"\" import healpy as hp # coordinate", "zoomed-in map latra: latitude range of zoom-in map rot: center of zoomed in", "lb, rt = curr_lim if lb <= rt: # normal ordering combined =", "= len(x)//ncolors low = i*chunksize # add 1 to keep lines connected high", "target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return", "xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord, xy=(0.8, -0.05), size=\"medium\", xycoords=\"axes fraction\")", "**kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is not None: plt.close('all') ret =", "connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator", "for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else:", "import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from collections import namedtuple CoordSys", "\"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is not", "= np.where(H>limit) count = H[w] return count.sum() - target levels = [optimize.bisect(objective, H.min(),", "func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and", "cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates", "number of different colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0,", "auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95,", "91, 30) # actual text at those coordinates if coord == COORD.det: llats", "= func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return pdfwrapper def", "# white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in", "return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit", "matplotlib axis *ax*, restricts the axis limits to xlim and ylim if they", "around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset,", "is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given", "keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func):", "decorator to save all plots generated by func to pdf \"\"\" @functools.wraps(func) def", "or the current limit. Reverse order (i.e. left > right) is allowed and", "= 90-lats else: llats = lats # white outline around text pe =", "kwargs['pdffile'] is not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else:", "to save all plots generated by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args,", "if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1]", "zoomed in map lcoord: label of coordinate system \"\"\" import healpy as hp", "import PdfPages from matplotlib import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as", "# add 1 to keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high],", "if figs is None: figs = [plt.figure(n) for n in plt.get_fignums()] for fig", "plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save all plots generated", "\"\"\" if bins is None: bins = int(np.sqrt(len(x))) # Make a 2d normed", "n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y,", "sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb,", "as hp # coordinate labels ax = plt.gca() if zoom: # location of", "whether the current limit exceeds the bounds and returns the appropriate new limits", "on bounds or the current limit. Reverse order (i.e. left > right) is", "the pdf corresponding to the passed confidence levels \"\"\" if bins is None:", "ylim). Otherwise limits are kept as is. *xlim* and *ylim* are the restricted", "181, 30) lats = np.arange(-90, 91, 30) # actual text at those coordinates", "should be passed as tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim),", "of zoomed-in map latra: latitude range of zoom-in map rot: center of zoomed", "plt.gca() if zoom: # location of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset", "PdfPages(filename) if figs is None: figs = [plt.figure(n) for n in plt.get_fignums()] for", "functools import numpy as np from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages", "if lb <= rt: # normal ordering combined = sorted(curr_lim+bounds) # no overlap", "count = H[w] return count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,))", "chunksize = len(x)//ncolors low = i*chunksize # add 1 to keep lines connected", "as is. *xlim* and *ylim* are the restricted ranges and should be passed", "kwargs and kwargs['pdffile'] is not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return", "lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0],", "bounds and returns the appropriate new limits based on bounds or the current", "\"\"\" pp = PdfPages(filename) if figs is None: figs = [plt.figure(n) for n", "combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap if", "np.where(H>limit) count = H[w] return count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(),", "else: lon_offset = -180 lat_offset = 0 # lonlat coordinates for labels lons", "= colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize", "healpy map zoom: indicates zoomed-in cartview lonra: longitude range of zoomed-in map latra:", "= np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30) # actual text at", "exceed the bounds (xlim, ylim). If the axis limit does not overlap with", "for labels lons = np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30) #", "right) is allowed and accounted for. \"\"\" lb, rt = curr_lim if lb", "from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import matplotlib.patheffects as path_effects import", "zoom: indicates zoomed-in cartview lonra: longitude range of zoomed-in map latra: latitude range", "which the lines change colors as the data is stepped through. *ncolors* specifies", "import matplotlib.pyplot as plt from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq", "PdfPages from matplotlib import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt", "return combined[1], combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no", "if zoom: # location of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset =", "new limits are set to (xlim, ylim). Otherwise limits are kept as is.", "else: # reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2])", "# location of other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] #", "*ax*, restricts the axis limits to xlim and ylim if they exceed the", "i*chunksize # add 1 to keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high],", "a line plot in which the lines change colors as the data is", "30) lats = np.arange(-90, 91, 30) # actual text at those coordinates if", "bin membership as proportional to Likelihood # This is true when data comes", "= curr_lim if lb <= rt: # normal ordering combined = sorted(curr_lim+bounds) #", "coord == COORD.det: llats = 90-lats else: llats = lats # white outline", "coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2)", "restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the axis limits to", "import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic')", "to xlim and ylim if they exceed the bounds (xlim, ylim). If the", "Reverse order (i.e. left > right) is allowed and accounted for. \"\"\" lb,", "rt): return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a", "new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds the bounds and returns the", "import numpy as np from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from", "if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None:", "Otherwise limits are kept as is. *xlim* and *ylim* are the restricted ranges", "def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which the", "norm=H.sum() # Find the norm of the sum # Take histogram bin membership", "the passed confidence levels \"\"\" if bins is None: bins = int(np.sqrt(len(x))) #", "of the pdf corresponding to the passed confidence levels \"\"\" if bins is", "objective(limit, target): w = np.where(H>limit) count = H[w] return count.sum() - target levels", "limit does not overlap with (xlim, ylim), the new limits are set to", "as tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim", "path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom:", "combined = sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) == (lb, rt) or", "lons = np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30) # actual text", "(xlim, ylim). If the axis limit does not overlap with (xlim, ylim), the", "ranges and should be passed as tuples \"\"\" if xlim is not None:", "return bounds[0], bounds[1] return combined[1], combined[2] else: # reverse ordering combined = sorted(curr_lim+bounds,", "eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once", "data comes from a Markovian process def objective(limit, target): w = np.where(H>limit) count", "labels coordinates on a healpy map zoom: indicates zoomed-in cartview lonra: longitude range", "bins is None: bins = int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True)", "func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim,", "if bins is None: bins = int(np.sqrt(len(x))) # Make a 2d normed histogram", "_ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _", "\"\"\"Plot a line plot in which the lines change colors as the data", "proportional to Likelihood # This is true when data comes from a Markovian", "if 'pdffile' in kwargs and kwargs['pdffile'] is not None: plt.close('all') ret = func(*args,", "reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) == (lb,", "ylim). If the axis limit does not overlap with (xlim, ylim), the new", "those coordinates if coord == COORD.det: llats = 90-lats else: llats = lats", "bins=None): \"\"\"given 2D datapoints, return values of the pdf corresponding to the passed", "-180 lat_offset = 0 # lonlat coordinates for labels lons = np.arange(-150, 181,", "exceeds the bounds and returns the appropriate new limits based on bounds or", "tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] else: # reverse", "of zoomed in map lcoord: label of coordinate system \"\"\" import healpy as", "ax = plt.gca() if zoom: # location of other, fixed coordinate lon_offset =", "contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values of the pdf", "the bounds (xlim, ylim). If the axis limit does not overlap with (xlim,", "pp = PdfPages(filename) if figs is None: figs = [plt.figure(n) for n in", "== (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] else: # reverse ordering", "norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors low =", "# Find the norm of the sum # Take histogram bin membership as", "cartview lonra: longitude range of zoomed-in map latra: latitude range of zoom-in map", "normal ordering combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb, rt)", "numpy as np from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib", "label of coordinate system \"\"\" import healpy as hp # coordinate labels ax", "overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt): return bounds[0],", "auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values of", "(lb, rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2]", "import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from collections import", "else: llats = lats # white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'),", "# lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset),", "new limits based on bounds or the current limit. Reverse order (i.e. left", "def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds the bounds and returns", "to (xlim, ylim). Otherwise limits are kept as is. *xlim* and *ylim* are", "lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for labels lons =", "= CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp =", "= namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None,", "matplotlib import colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from collections", "# no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt):", "*xlim* and *ylim* are the restricted ranges and should be passed as tuples", "change colors as the data is stepped through. *ncolors* specifies the number of", "bounds): \"\"\"checks whether the current limit exceeds the bounds and returns the appropriate", "sum # Take histogram bin membership as proportional to Likelihood # This is", "plots generated by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile'", "limits are kept as is. *xlim* and *ylim* are the restricted ranges and", "2) else: lon_offset = -180 lat_offset = 0 # lonlat coordinates for labels", "cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values of the pdf corresponding to", "tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is", "lats = np.arange(-90, 91, 30) # actual text at those coordinates if coord", "dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is None: figs =", "ylim if they exceed the bounds (xlim, ylim). If the axis limit does", "pdf(func): \"\"\" decorator to save all plots generated by func to pdf \"\"\"", "as path_effects import matplotlib.pyplot as plt from collections import namedtuple CoordSys = namedtuple('CoordSys',", "datapoints, return values of the pdf corresponding to the passed confidence levels \"\"\"", "import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import matplotlib.patheffects as", "COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp", "= rot[1]+latra[0] # lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats", "ret else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the", "returns the appropriate new limits based on bounds or the current limit. Reverse", "white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats,", "are set to (xlim, ylim). Otherwise limits are kept as is. *xlim* and", "# lonlat coordinates for labels lons = np.arange(-150, 181, 30) lats = np.arange(-90,", "multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is None:", "ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x,", "= -180 lat_offset = 0 # lonlat coordinates for labels lons = np.arange(-150,", "int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm", "# actual text at those coordinates if coord == COORD.det: llats = 90-lats", "path_effects=pe) if zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else:", "ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return pdfwrapper", "= [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels def", "ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return values", "**kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds the", "actual text at those coordinates if coord == COORD.det: llats = 90-lats else:", "lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a healpy map zoom: indicates zoomed-in", "if coord == COORD.det: llats = 90-lats else: llats = lats # white", "figs = [plt.figure(n) for n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf')", "y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which the lines change", "**kwargs): \"\"\"Plot a line plot in which the lines change colors as the", "return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts", "combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the axis", "is true when data comes from a Markovian process def objective(limit, target): w", "corresponding to the passed confidence levels \"\"\" if bins is None: bins =", "coordinate system \"\"\" import healpy as hp # coordinate labels ax = plt.gca()", "the appropriate new limits based on bounds or the current limit. Reverse order", "ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None):", "30) # actual text at those coordinates if coord == COORD.det: llats =", "np.arange(-90, 91, 30) # actual text at those coordinates if coord == COORD.det:", "not overlap with (xlim, ylim), the new limits are set to (xlim, ylim).", "plt from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD =", "figs is None: figs = [plt.figure(n) for n in plt.get_fignums()] for fig in", "path_effects import matplotlib.pyplot as plt from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det", "\"\"\" import healpy as hp # coordinate labels ax = plt.gca() if zoom:", "is not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return", "= np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset = 0 # lonlat", "= plt.gca() if zoom: # location of other, fixed coordinate lon_offset = rot[0]+lonra[0]", "restricts the axis limits to xlim and ylim if they exceed the bounds", "in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in", "plot in which the lines change colors as the data is stepped through.", "to keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def", "return values of the pdf corresponding to the passed confidence levels \"\"\" if", "stepped through. *ncolors* specifies the number of different colors to use \"\"\" cmap", "hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625),", "a Markovian process def objective(limit, target): w = np.where(H>limit) count = H[w] return", "system \"\"\" import healpy as hp # coordinate labels ax = plt.gca() if", "xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(),", "lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset =", "lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset = 0 # lonlat coordinates for", "are the restricted ranges and should be passed as tuples \"\"\" if xlim", "rot: center of zoomed in map lcoord: label of coordinate system \"\"\" import", "= int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the", "in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a", "None: bins = int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() #", "else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current", "ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord, xy=(0.8, -0.05), size=\"medium\", xycoords=\"axes", "\"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is None: figs = [plt.figure(n)", "for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for", "lonlat=True, path_effects=pe) if zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe)", "xlim=None, ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the axis limits to xlim", "pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which", "of the sum # Take histogram bin membership as proportional to Likelihood #", "\"\"\" decorator to save all plots generated by func to pdf \"\"\" @functools.wraps(func)", "CoordSys = namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename,", "or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] else: #", "from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector',", "pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds the bounds and", "and *ylim* are the restricted ranges and should be passed as tuples \"\"\"", "lat_offset = 0 # lonlat coordinates for labels lons = np.arange(-150, 181, 30)", "if they exceed the bounds (xlim, ylim). If the axis limit does not", "return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds the bounds", "no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt): return", "longitude range of zoomed-in map latra: latitude range of zoom-in map rot: center", "2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset = 0", "[optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord,", "histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the sum # Take histogram", "fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot", "scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import matplotlib.patheffects", "tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None,", "lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset = 0 #", "bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given a matplotlib axis", "kept as is. *xlim* and *ylim* are the restricted ranges and should be", "func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds): \"\"\"checks whether the current limit exceeds", "_[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_),", "use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors):", "pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is not None: plt.close('all') ret", "axis limits to xlim and ylim if they exceed the bounds (xlim, ylim).", "bounds or the current limit. Reverse order (i.e. left > right) is allowed", "zoom-in map rot: center of zoomed in map lcoord: label of coordinate system", "\"\"\" lb, rt = curr_lim if lb <= rt: # normal ordering combined", "membership as proportional to Likelihood # This is true when data comes from", "cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which the lines change colors as", "H.min(), H.max(), args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False,", "zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a healpy map zoom: indicates", "limits based on bounds or the current limit. Reverse order (i.e. left >", "*ylim* are the restricted ranges and should be passed as tuples \"\"\" if", "other, fixed coordinate lon_offset = rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for", "to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile']", "== COORD.det: llats = 90-lats else: llats = lats # white outline around", "optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors import matplotlib.patheffects as path_effects", "line plot in which the lines change colors as the data is stepped", "Take histogram bin membership as proportional to Likelihood # This is true when", "format='pdf') pp.close() def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in", "add 1 to keep lines connected high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)),", "limit. Reverse order (i.e. left > right) is allowed and accounted for. \"\"\"", "specifies the number of different colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm", "bins = int(np.sqrt(len(x))) # Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find", "target): w = np.where(H>limit) count = H[w] return count.sum() - target levels =", "matplotlib.pyplot as plt from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal')", "ylim=None): \"\"\"Given a matplotlib axis *ax*, restricts the axis limits to xlim and", "high = min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to", "left > right) is allowed and accounted for. \"\"\" lb, rt = curr_lim", "a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the sum", "labels lons = np.arange(-150, 181, 30) lats = np.arange(-90, 91, 30) # actual", "np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset = 0 # lonlat coordinates", "rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0],", "= lats # white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for", "= sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) ==", "np from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import colors", "rt: # normal ordering combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2]) ==", "passed as tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if", "sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) ==", "passed confidence levels \"\"\" if bins is None: bins = int(np.sqrt(len(x))) # Make", "len(x)//ncolors low = i*chunksize # add 1 to keep lines connected high =", "If the axis limit does not overlap with (xlim, ylim), the new limits", "2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the sum #", "lcoord: label of coordinate system \"\"\" import healpy as hp # coordinate labels", "in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize # add 1 to keep", "lines change colors as the data is stepped through. *ncolors* specifies the number", "foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if", "range(ncolors): chunksize = len(x)//ncolors low = i*chunksize # add 1 to keep lines", "restricted ranges and should be passed as tuples \"\"\" if xlim is not", "**kwargs) def pdf(func): \"\"\" decorator to save all plots generated by func to", "\"\"\"checks whether the current limit exceeds the bounds and returns the appropriate new", "lonra: longitude range of zoomed-in map latra: latitude range of zoom-in map rot:", "set to (xlim, ylim). Otherwise limits are kept as is. *xlim* and *ylim*", "range of zoomed-in map latra: latitude range of zoom-in map rot: center of", "at those coordinates if coord == COORD.det: llats = 90-lats else: llats =", "2D datapoints, return values of the pdf corresponding to the passed confidence levels", "for. \"\"\" lb, rt = curr_lim if lb <= rt: # normal ordering", "not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args,", "to use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in", "comes from a Markovian process def objective(limit, target): w = np.where(H>limit) count =", "the axis limit does not overlap with (xlim, ylim), the new limits are", "\"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize", "min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save all", "def objective(limit, target): w = np.where(H>limit) count = H[w] return count.sum() - target", "the lines change colors as the data is stepped through. *ncolors* specifies the", "the axis limits to xlim and ylim if they exceed the bounds (xlim,", "from a Markovian process def objective(limit, target): w = np.where(H>limit) count = H[w]", "= rot[0]+lonra[0] lat_offset = rot[1]+latra[0] # lonlat coordinates for labels lons = np.arange(np.round(lon_offset),", "in kwargs and kwargs['pdffile'] is not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile'])", "rot=None): \"\"\" labels coordinates on a healpy map zoom: indicates zoomed-in cartview lonra:", "they exceed the bounds (xlim, ylim). If the axis limit does not overlap", "colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i", "llats = lats # white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()]", "appropriate new limits based on bounds or the current limit. Reverse order (i.e.", "(xlim, ylim). Otherwise limits are kept as is. *xlim* and *ylim* are the", "ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) == (lb, rt)", "as proportional to Likelihood # This is true when data comes from a", "= [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True,", "**kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs) return pdfwrapper def new_lims(curr_lim, bounds):", "namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def", "of coordinate system \"\"\" import healpy as hp # coordinate labels ax =", "path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord, xy=(0.8, -0.05),", "be passed as tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None)", "generated by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in", "None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints,", "for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625),", "[path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _ in zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe)", "and returns the appropriate new limits based on bounds or the current limit.", "rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] else:", "None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret else: return func(*args, **kwargs)", "as plt from collections import namedtuple CoordSys = namedtuple('CoordSys', 'det eq gal') COORD", "is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not None: ax.set_ylim(*new_lims(ax.get_ylim(), ylim),", "pdf corresponding to the passed confidence levels \"\"\" if bins is None: bins", "to Likelihood # This is true when data comes from a Markovian process", "0 # lonlat coordinates for labels lons = np.arange(-150, 181, 30) lats =", "lb <= rt: # normal ordering combined = sorted(curr_lim+bounds) # no overlap if", "for cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None):", "the current limit. Reverse order (i.e. left > right) is allowed and accounted", "hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in lons: hp.projtext(_, lat_offset,", "CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename)", "Make a 2d normed histogram H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True) norm=H.sum() # Find the norm of the", "<= rt: # normal ordering combined = sorted(curr_lim+bounds) # no overlap if tuple(combined[:2])", "is None: figs = [plt.figure(n) for n in plt.get_fignums()] for fig in figs:", "current limit exceeds the bounds and returns the appropriate new limits based on", "the number of different colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm =", "and kwargs['pdffile'] is not None: plt.close('all') ret = func(*args, **kwargs) multipage(kwargs['pdffile']) return ret", "rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] def", "Likelihood # This is true when data comes from a Markovian process def", "the new limits are set to (xlim, ylim). Otherwise limits are kept as", "are kept as is. *xlim* and *ylim* are the restricted ranges and should", "the data is stepped through. *ncolors* specifies the number of different colors to", "\"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95, 0.625), size=\"medium\") ax.annotate(coord,", "in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels", "allowed and accounted for. \"\"\" lb, rt = curr_lim if lb <= rt:", "plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors low", "map rot: center of zoomed in map lcoord: label of coordinate system \"\"\"", "[plt.figure(n) for n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close() def", "of different colors to use \"\"\" cmap = plt.get_cmap(cmapname) norm = colors.Normalize(vmin=0, vmax=ncolors-1)", "ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None) def contour_levels(x, y, cls=(0.95, 0.68), bins=None): \"\"\"given 2D datapoints, return", "= np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180", "zoomed-in cartview lonra: longitude range of zoomed-in map latra: latitude range of zoom-in", "import healpy as hp # coordinate labels ax = plt.gca() if zoom: #", "levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on", "def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is not None: plt.close('all')", "is. *xlim* and *ylim* are the restricted ranges and should be passed as", "\"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None) if ylim is not", "# reverse ordering combined = sorted(curr_lim+bounds, reverse=True) # no overlap if tuple(combined[:2]) ==", "overlap with (xlim, ylim), the new limits are set to (xlim, ylim). Otherwise", "bounds (xlim, ylim). If the axis limit does not overlap with (xlim, ylim),", "ncolors=5, cmapname='viridis_r', **kwargs): \"\"\"Plot a line plot in which the lines change colors", "colors.Normalize(vmin=0, vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize #", "None: figs = [plt.figure(n) for n in plt.get_fignums()] for fig in figs: fig.savefig(pp,", "# Take histogram bin membership as proportional to Likelihood # This is true", "does not overlap with (xlim, ylim), the new limits are set to (xlim,", "center of zoomed in map lcoord: label of coordinate system \"\"\" import healpy", "= H[w] return count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for", "latra=None, rot=None): \"\"\" labels coordinates on a healpy map zoom: indicates zoomed-in cartview", "@functools.wraps(func) def pdfwrapper(*args, **kwargs): if 'pdffile' in kwargs and kwargs['pdffile'] is not None:", "matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from collections import namedtuple CoordSys =", "the current limit exceeds the bounds and returns the appropriate new limits based", "= min((i+1)*chunksize+1, len(x)) plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs) def pdf(func): \"\"\" decorator to save", "http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is None: figs = [plt.figure(n) for", "lat_offset = rot[1]+latra[0] # lonlat coordinates for labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2)", "as the data is stepped through. *ncolors* specifies the number of different colors", "(lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] else: # reverse ordering combined", "order (i.e. left > right) is allowed and accounted for. \"\"\" lb, rt", "limits are set to (xlim, ylim). Otherwise limits are kept as is. *xlim*", "in which the lines change colors as the data is stepped through. *ncolors*", "# This is true when data comes from a Markovian process def objective(limit,", "== (lb, rt) or tuple(combined[2:]) == (lb, rt): return bounds[0], bounds[1] return combined[1],", "(lb, rt): return bounds[0], bounds[1] return combined[1], combined[2] def restrict_axes(ax, xlim=None, ylim=None): \"\"\"Given", "latra: latitude range of zoom-in map rot: center of zoomed in map lcoord:", "all plots generated by func to pdf \"\"\" @functools.wraps(func) def pdfwrapper(*args, **kwargs): if", "> right) is allowed and accounted for. \"\"\" lb, rt = curr_lim if", "as np from scipy import optimize from matplotlib.backends.backend_pdf import PdfPages from matplotlib import", "in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\",", "'det eq gal') COORD = CoordSys('Detector', 'Equatorial', 'Galactic') def multipage(filename, figs=None, dpi=200): \"\"\"", "colors import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt from collections import namedtuple", "return count.sum() - target levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,)) for cl in", "def multipage(filename, figs=None, dpi=200): \"\"\" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once \"\"\" pp = PdfPages(filename) if figs is", "lats # white outline around text pe = [path_effects.Stroke(linewidth=1.5, foreground='white'), path_effects.Normal()] for _", "a healpy map zoom: indicates zoomed-in cartview lonra: longitude range of zoomed-in map", "args=(cl*norm,)) for cl in cls] levels.append(H.max()) return levels def hp_ticklabels(coord, zoom=False, lonra=None, latra=None,", "def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None): \"\"\" labels coordinates on a healpy map", "labels lons = np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset", "= PdfPages(filename) if figs is None: figs = [plt.figure(n) for n in plt.get_fignums()]", "and should be passed as tuples \"\"\" if xlim is not None: ax.set_xlim(*new_lims(ax.get_xlim(),", "\"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True,", "the restricted ranges and should be passed as tuples \"\"\" if xlim is", "and ylim if they exceed the bounds (xlim, ylim). If the axis limit", "rt): return bounds[0], bounds[1] return combined[1], combined[2] else: # reverse ordering combined =", "the sum # Take histogram bin membership as proportional to Likelihood # This", "for n in plt.get_fignums()] for fig in figs: fig.savefig(pp, format='pdf') pp.close() def colorlines(x,", "lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\") ax.annotate(r\"$\\bf{180^\\circ}$\", xy=(-1.95,", "zip(lats, llats): hp.projtext(lon_offset, _[0], \"{:.0f}$^\\circ$\".format(_[1]), lonlat=True, path_effects=pe) if zoom: for _ in lons:", "for i in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize # add 1", "_ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7, 0.625), size=\"medium\")", "Markovian process def objective(limit, target): w = np.where(H>limit) count = H[w] return count.sum()", "vmax=ncolors-1) for i in range(ncolors): chunksize = len(x)//ncolors low = i*chunksize # add", "zoom: for _ in lons: hp.projtext(_, lat_offset, \"{:.0f}$^\\circ$\".format(_), lonlat=True, path_effects=pe) else: ax.annotate(r\"$\\bf{-180^\\circ}$\", xy=(1.7,", "hp # coordinate labels ax = plt.gca() if zoom: # location of other,", "np.arange(np.round(lon_offset), lon_offset+lonra[1]-lonra[0], 2) lats = np.arange(np.round(lat_offset), lat_offset+latra[1]-latra[0], 2) else: lon_offset = -180 lat_offset" ]
[ "__name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"])", "retval: return Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__], __name__).add_url_rule(element[0], view_func =", "(\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__()", "] class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status = '200 '+message", "= 200 self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def", "def post(self): print \"Load state operation\" retval = load_state() if retval: return Successful(\"Successful", "= load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__],", "self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView):", "def post(self): print \"Save state operation\" retval = save_state() if retval: return Successful(\"Successful", "info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval = save_state()", "load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__], __name__).add_url_rule(element[0],", "retval = save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def", "= '200 '+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data = info", "import sys from flask import Response, Blueprint from flask.views import MethodView from backend.backend", "\"Load state operation\" retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for", "= [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def", "'+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class", "BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval = load_state() if retval: return", "if retval: return Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__], __name__).add_url_rule(element[0], view_func", "class BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval = load_state() if retval:", "Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__], __name__).add_url_rule(element[0], view_func = globals()[element[1]].as_view(''+element[1]+'_api'), methods=element[2])", "#/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval = save_state() if", "flask.views import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls", "import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]),", "from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" ,", "self.status = '200 '+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data =", "backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\",", "class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code", "state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval = load_state()", "\"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status =", "import Response, Blueprint from flask.views import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__],", "Response, Blueprint from flask.views import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__,", "operation\" retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for element in", "[\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''): super(Successful,", "load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" ,", "state operation\" retval = save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class", "print \"Save state operation\" retval = save_state() if retval: return Successful(\"Successful operation\",'Saved state')", "\"Save state operation\" retval = save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/", "from flask import Response, Blueprint from flask.views import MethodView from backend.backend import save_state,", "#/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval = load_state() if", "self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval", ", \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message,", "post(self): print \"Save state operation\" retval = save_state() if retval: return Successful(\"Successful operation\",'Saved", "__init__(self, message, info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code = 200 self.headers", "retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state", "message, info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code = 200 self.headers =", "BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval = save_state() if retval: return", "[\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status = '200", "urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response):", "super(Successful, self).__init__() self.status = '200 '+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'}", "Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval", "retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for element in urls:", "operation\" retval = save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView):", "save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print", ", \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status", "= {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save", "if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load", "self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print", "class BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval = save_state() if retval:", "\"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self, message, info=''):", "post(self): print \"Load state operation\" retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded", "200 self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self):", "state operation\" retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded state') for element", "Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ]", "import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls =", "'200 '+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data = info #/backend/save_state/", "return Successful(\"Successful operation\",'Loaded state') for element in urls: getattr(sys.modules[__name__], __name__).add_url_rule(element[0], view_func = globals()[element[1]].as_view(''+element[1]+'_api'),", "operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state operation\" retval =", "{'Content-Type': 'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state", "return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self): print \"Load state operation\"", "MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\"", "def __init__(self, message, info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code = 200", "Blueprint from flask.views import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__,", "flask import Response, Blueprint from flask.views import MethodView from backend.backend import save_state, load_state", "sys from flask import Response, Blueprint from flask.views import MethodView from backend.backend import", "__name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class", "[(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\", [\"POST\"]) ] class Successful(Response): def __init__(self,", "= save_state() if retval: return Successful(\"Successful operation\",'Saved state') #/backend/load_state/ class BackendLoadState(MethodView): def post(self):", "save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\"", "self).__init__() self.status = '200 '+message self.status_code = 200 self.headers = {'Content-Type': 'application/json'} self.data", "'application/json'} self.data = info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state operation\"", "setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__)) urls = [(\"/backend/save_state/\" , \"BackendSaveState\", [\"POST\"]), (\"/backend/load_state/\" , \"BackendLoadState\",", "Successful(Response): def __init__(self, message, info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code =", "from flask.views import MethodView from backend.backend import save_state, load_state setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__))", "print \"Load state operation\" retval = load_state() if retval: return Successful(\"Successful operation\",'Loaded state')", "info=''): super(Successful, self).__init__() self.status = '200 '+message self.status_code = 200 self.headers = {'Content-Type':", "= info #/backend/save_state/ class BackendSaveState(MethodView): def post(self): print \"Save state operation\" retval =" ]
[ "SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result", "@api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email'])", "try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message':", "in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager()", "user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result,", "return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email',", "req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could not", "import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query(", "user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as", "in query: with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return", "SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from", "result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with SessionContextManager() as", "return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result", "= [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required", "@api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query)", "from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs,", "SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body'])", "pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g)", "import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api =", "user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count =", "created!'}), 200 return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user):", "in req ] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id)", "request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session:", "RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import", "query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs',", "result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query =", "except MailError: return jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message': 'There are", "sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)]", "'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json()", "from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers", "row in query: with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200", "pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query)", "deleted!'}), 200 return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def", "return jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message': 'There are no results'}),", "import itertools from flask import jsonify, abort, Blueprint, request from website.database.DB import SessionFactory,", "flask import jsonify, abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from website.database.models", "> 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count)", "= pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query) return", "schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool", "pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id = result['draw_count']", "jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs", "query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError: return", "k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query", "website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import", "sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results:", "person['person_email']) for person in req ] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool)", "200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs =", "sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs", "as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is no", "schema = ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return", "404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all()", "import operator import itertools from flask import jsonify, abort, Blueprint, request from website.database.DB", "is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json()", "= SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST'])", "= request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as", "jsonify, abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson,", "'Your pairs were created!'}), 200 return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE'])", "__name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter(", "len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM:", "import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from", "sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result),", "generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending", "is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs =", "SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with SessionContextManager() as session: session.delete(row) return", "def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in", "item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g", "first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager():", "do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id = pair['pair']", "[first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id )", "import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema,", "g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query =", "draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if", "user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM:", "import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from", "with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person,", "for row in query: with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}),", "methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema =", "description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id", "query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients,", "pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query) return jsonify({'message':", "404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id = result['draw_count'] query", "were created!'}), 200 return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def", "from flask import jsonify, abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from", "DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result", "grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs')", "= SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'],", "== user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result =", "for [first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id", "token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api = Blueprint('api',", "Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount", "= DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count:", "= RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user):", "return jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403, description='You cannot do this')", "is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id", "def delete_results(user): result = request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query:", "deleted!'}), 200 return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day')", "with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403, description='You", "first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete()", "= SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs = RandomPair(", "draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your", "if query: with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return", "= Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count", "<reponame>marcinkaczmarek10/pairs_generator_flask<filename>website/API/generate_pairs.py import operator import itertools from flask import jsonify, abort, Blueprint, request from", "no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id =", "website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import", "SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required", "DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email,", "@api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema", "post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in req", "200 return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user):", "second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return", "website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results')", "session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is no result!'}), 404", "SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name,", "from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api = Blueprint('api', __name__)", "return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user):", "= sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return", "limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin(", "= operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g in", "[list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def", "= [ Person(person['person_name'], person['person_email']) for person in req ] if len(user_random_people_pool) > 1:", "get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema", "is_draw_count: for [first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email,", ") with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs", "sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200 return", "jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result =", "= schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for", "query: with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message':", "jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST'])", "MailError: return jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message': 'There are no", "from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter", "jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE'])", "operator import itertools from flask import jsonify, abort, Blueprint, request from website.database.DB import", "def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs),", "= request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in req ] if", "def delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query:", "RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result =", "send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query)", "RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from", "if query: for row in query: with SessionContextManager() as session: session.delete(row) return jsonify({'message':", "UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema", "api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount,", "query: with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message':", "] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager()", "if is_draw_count: for [first_person, second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name,", "import jsonify, abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from website.database.models import", "@token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True)", "= RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs)", "def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all()", "return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True)", "= SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with SessionContextManager() as session: session.delete(row)", "for person in req ] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count", "request from website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from", "delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with", "operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result,", "SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is", "SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403, description='You cannot do", "session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is no pair!'}), 404", "= request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in", "as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is no", "request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if query: try:", "return jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could not send mails!'}),", "jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could not send mails!'}), 500", "website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError", "methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all()", "methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for", "with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were", "item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema =", "itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema", "'Result deleted!'}), 200 return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required", "@api.route('/delete-results', methods=['DELETE']) @token_required def delete_results(user): result = request.get_json() draw_id = result['draw_count'] query =", "jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403, description='You cannot do this') @api.route('/delete-pair',", "SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200", "delete_results(user): result = request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for", "user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in req ] if len(user_random_people_pool) >", "RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter", "as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person] in", "== DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter =", "no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query", "schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def", "SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is", "website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required", "get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200", "cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id =", "pairs were created!'}), 200 return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required", "sent!'}), 200 except MailError: return jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message':", "abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json()", "for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user):", "session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There is no result!'}),", "'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req =", "if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as", "200 return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair", "Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import", "second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message':", "second_person] in user_results: user_random_pairs = RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with", "website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query =", "@limiter.limit('20/day') def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients", "200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'],", "DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager", "from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person from website.utils.data_serializers import ResultSchema, RandomPersonSchema from website.utils.login_manager import", "@token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all() schema = RandomPersonSchema(many=True) user_pairs = schema.dump(query) return", "@token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person", "@token_required def delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if", "jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def send_email_to_chosen(user): req", "from website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs", "@api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query =", "= SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True)", "[ Person(person['person_name'], person['person_email']) for person in req ] if len(user_random_people_pool) > 1: user_results", "return abort(403, description='You cannot do this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair =", "website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api", "with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There", "'Pair deleted!'}), 200 return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results', methods=['DELETE']) @token_required", "ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result =", "person in req ] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count =", "jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool = [", "key=item_getter) grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200", "SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}),", "this') @api.route('/delete-pair', methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query", "Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count ==", "pair = request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager()", "schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result = [list(g) for k,", "result = request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row", "DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for", "request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in req ] if len(user_random_people_pool)", "request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query:", "MailError from website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user):", "= generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query(", "req = request.get_json() user_random_people_pool = [ Person(person['person_name'], person['person_email']) for person in req ]", "schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except", "abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair,", "RandomPersonSchema(many=True) user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req", "DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count')", "ResultSchema, RandomPersonSchema from website.utils.login_manager import token_required from website.utils.email_sending import send_mail_to_pairs, MailError from website.generate_pairs.routes", "sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403,", "SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200 return abort(403, description='You cannot", "methods=['DELETE']) @token_required def delete_pair(user): pair = request.get_json() pair_id = pair['pair'] query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first()", "@token_required def delete_results(user): result = request.get_json() draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if", "query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema =", "SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person", "req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if", "DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id == user.id).all() schema = ResultSchema(many=True) pretty_result = schema.dump(query)", "as sessionCM: sessionCM.add(user_random_pairs) with SessionContextManager(): SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete() return jsonify({'Message': 'Your pairs were created!'}), 200", "import send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required", "query: for row in query: with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result", "= schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200", "if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError:", "from website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def get_results(user): query", "user_pairs = schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req =", "return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is no pair!'}), 404 @api.route('/delete-results',", "= SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair deleted!'}),", "= result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with SessionContextManager()", "= schema.dump(query) return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json()", "req ] if len(user_random_people_pool) > 1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with", "import SessionFactory, SessionContextManager from website.database.models import UsersPerson, RandomPair, DrawCount from website.generate_pairs.generate_random_pairs import generate_random_pairs,", "with SessionContextManager() as session: session.delete(row) return jsonify({'message': 'Result deleted!'}), 200 return jsonify({'message': 'There", "session: session.delete(query) return jsonify({'message': 'Pair deleted!'}), 200 return jsonify({'message': 'There is no pair!'}),", "ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails", "query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first() if query: with SessionContextManager() as session: session.delete(query) return jsonify({'message': 'Pair", "recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}),", "send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could", "@api.route('/results') @token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id", "in itertools.groupby(sorted_pretty_result, item_getter)] return jsonify(grouped_result), 200 @api.route('/pairs') @token_required def get_user_pairs(user): query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all()", "return jsonify(user_pairs), 200 @api.route('/generate-pairs', methods=['POST']) @token_required def post_generate_pairs(user): req = request.get_json() user_random_people_pool =", "RandomPair( first_person_name=first_person.name, first_person_email=first_person.email, second_person_name=second_person.name, second_person_email=second_person.email, draw_count=is_draw_count.id ) with SessionContextManager() as sessionCM: sessionCM.add(user_random_pairs) with", "200 return jsonify({'message': 'There is no result!'}), 404 @api.route('/send-email', methods=['POST']) @token_required @limiter.limit('20/day') def", "req['body']) return jsonify({'message': 'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could not send", "SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first() if is_draw_count: for [first_person, second_person]", "= request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients = schema.dump(query) if query:", "schema = ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter)", "query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with SessionContextManager() as session:", "1: user_results = generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count", "generate_random_pairs(user_random_people_pool) draw_count = DrawCount(user_id=user.id) with SessionContextManager() as sessionCM: sessionCM.add(draw_count) is_draw_count = SessionFactory.session.query( DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first()", "@token_required def get_results(user): query = SessionFactory.session.query( RandomPair).outerjoin( DrawCount, RandomPair.draw_count == DrawCount.id).filter( DrawCount.user_id ==", "draw_id = result['draw_count'] query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all() if query: for row in query: with", "def send_email_to_chosen(user): req = request.get_json() query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all() schema = ResultSchema(many=True) recipients =", "= ResultSchema(many=True) recipients = schema.dump(query) if query: try: send_mail_to_pairs(recipients, req['title'], req['body']) return jsonify({'message':", "itertools from flask import jsonify, abort, Blueprint, request from website.database.DB import SessionFactory, SessionContextManager", "Person(person['person_name'], person['person_email']) for person in req ] if len(user_random_people_pool) > 1: user_results =", "200 except MailError: return jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message': 'There", "jsonify({'message': 'Could not send mails!'}), 500 return jsonify({'message': 'There are no results'}), 404", "send_mail_to_pairs, MailError from website.generate_pairs.routes import limiter api = Blueprint('api', __name__) @api.route('/results') @token_required def", "'Emails sent!'}), 200 except MailError: return jsonify({'message': 'Could not send mails!'}), 500 return", "= ResultSchema(many=True) pretty_result = schema.dump(query) item_getter = operator.itemgetter('draw_count') sorted_pretty_result = sorted(pretty_result, key=item_getter) grouped_result" ]
[ "threading class MultiThreading(object): def __init__(self, scrapers): self.scrapers = scrapers def run(self): threads =", "scrapers): self.scrapers = scrapers def run(self): threads = [] for i in range(len(self.scrapers)):", "run(self): threads = [] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t)", "MultiThreading(object): def __init__(self, scrapers): self.scrapers = scrapers def run(self): threads = [] for", "class MultiThreading(object): def __init__(self, scrapers): self.scrapers = scrapers def run(self): threads = []", "= [] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t) for thread", "for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t) for thread in threads:", "__init__(self, scrapers): self.scrapers = scrapers def run(self): threads = [] for i in", "self.scrapers = scrapers def run(self): threads = [] for i in range(len(self.scrapers)): t", "def __init__(self, scrapers): self.scrapers = scrapers def run(self): threads = [] for i", "import threading class MultiThreading(object): def __init__(self, scrapers): self.scrapers = scrapers def run(self): threads", "= scrapers def run(self): threads = [] for i in range(len(self.scrapers)): t =", "def run(self): threads = [] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start()", "scrapers def run(self): threads = [] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start)", "threads = [] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t) for", "[] for i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t) for thread in", "i in range(len(self.scrapers)): t = threading.Thread(target=self.scrapers[i].start) t.start() threads.append(t) for thread in threads: thread.join()" ]
[ "from .create_scoped import create_scoped from .with_context import with_context from .with_extras import with_extra, with_extras,", ".create_scoped import create_scoped from .with_context import with_context from .with_extras import with_extra, with_extras, Extras", ".with_extras import with_extra, with_extras, Extras from .with_filter import with_filter, Filter from .with_identities import", "with_context from .with_extras import with_extra, with_extras, Extras from .with_filter import with_filter, Filter from", "with_extra, with_extras, Extras from .with_filter import with_filter, Filter from .with_identities import with_identity, with_identities,", ".with_context import with_context from .with_extras import with_extra, with_extras, Extras from .with_filter import with_filter,", "with_extras, Extras from .with_filter import with_filter, Filter from .with_identities import with_identity, with_identities, Identities", "import with_extra, with_extras, Extras from .with_filter import with_filter, Filter from .with_identities import with_identity,", "from .with_extras import with_extra, with_extras, Extras from .with_filter import with_filter, Filter from .with_identities", "import with_filter, Filter from .with_identities import with_identity, with_identities, Identities from .with_meta import with_meta", "from .with_filter import with_filter, Filter from .with_identities import with_identity, with_identities, Identities from .with_meta", "import with_context from .with_extras import with_extra, with_extras, Extras from .with_filter import with_filter, Filter", ".with_filter import with_filter, Filter from .with_identities import with_identity, with_identities, Identities from .with_meta import", "from .with_context import with_context from .with_extras import with_extra, with_extras, Extras from .with_filter import", "create_scoped from .with_context import with_context from .with_extras import with_extra, with_extras, Extras from .with_filter", "import create_scoped from .with_context import with_context from .with_extras import with_extra, with_extras, Extras from", "Extras from .with_filter import with_filter, Filter from .with_identities import with_identity, with_identities, Identities from" ]
[ "for i in range(1, n + 1): sum_of_digits = 0 for cd in", "n + 1): sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd])", "0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or", "range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits == 7) or", "if (sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i}", "= int(input()) for i in range(1, n + 1): sum_of_digits = 0 for", "and variables LAB/special numbers.py n = int(input()) for i in range(1, n +", "<filename>data types and variables LAB/special numbers.py n = int(input()) for i in range(1,", "variables LAB/special numbers.py n = int(input()) for i in range(1, n + 1):", "= 0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5)", "sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits", "1): sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits", "int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits == 11):", "(sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i} ->", "(sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i} -> True') else: print(f'{i} ->", "numbers.py n = int(input()) for i in range(1, n + 1): sum_of_digits =", "n = int(input()) for i in range(1, n + 1): sum_of_digits = 0", "+= int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits ==", "in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits == 7)", "or (sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i} -> True') else: print(f'{i}", "types and variables LAB/special numbers.py n = int(input()) for i in range(1, n", "sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits ==", "5) or (sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i} -> True') else:", "in range(1, n + 1): sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits", "== 7) or (sum_of_digits == 11): print(f'{i} -> True') else: print(f'{i} -> False')", "int(input()) for i in range(1, n + 1): sum_of_digits = 0 for cd", "+ 1): sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if", "== 5) or (sum_of_digits == 7) or (sum_of_digits == 11): print(f'{i} -> True')", "cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits ==", "range(1, n + 1): sum_of_digits = 0 for cd in range(len(str(i))): sum_of_digits +=", "LAB/special numbers.py n = int(input()) for i in range(1, n + 1): sum_of_digits", "i in range(1, n + 1): sum_of_digits = 0 for cd in range(len(str(i))):", "for cd in range(len(str(i))): sum_of_digits += int(str(i)[cd]) if (sum_of_digits == 5) or (sum_of_digits" ]
[ "reserve\" or info[0] == \"Reserve not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "based on the index of buy now or reserve price derived from source", "or info[0] == \"Reserve not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title,", "#>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return", "an int of the price #>> reserve and buy now values are returned", "\"Reserve not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation", "- {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title", "#>> returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km =", "info[0] == \"Reserve not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "info string and splits into list format #>> get_price(): based on the index", "based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the", "price derived from source code, removes $ and returns an int of the", "0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of variables to be processed", "self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle obtained", "the vehicle obtained from the title as an int #>> includes hard coded", "this class creates an instance of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of the vehicle #>> includes", "Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of variables", "returned based on the listing type #>> includes hard coded values based on", "None elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing", "title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return f\"{self.title} - {self.odometer} -", "return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the vehicle #>>", "def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of the", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list) == 3: return title_list[1] +", "vehicle obtained from the title as an int #>> includes hard coded values", "listing type #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "of the vehicle #>> based on preference #>> includes hard coded values based", "the vehicle #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "the listing type #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "\").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif", "vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer}", "#>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list", "num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of variables to", "get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type == \"classified\": return", "title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the", "4: return \"auction1\" elif len(info) == 2: if info[0] == \"Reserve met\" or", "type of the vehicle #>> based on preference #>> includes hard coded values", "string and splits into list format #>> get_price(): based on the index of", "next(iter(title_list)) for title in iter(title_list): make_model += title + \" \" return make_model.strip()", "info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of", "self.search_link = link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type", "or info[0] == \"No reserve\" or info[0] == \"Reserve not met\": return \"auction2\"", "findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def", "#>> returns a representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "len(title_list) == 3: return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for title", "= self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2)", "has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method", "\"Reserve met\" or info[0] == \"No reserve\" or info[0] == \"Reserve not met\":", "def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type", "= self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle obtained from", "else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer", "self.price_info.split(\" \") if len(info) == 4: return \"auction1\" elif len(info) == 2: if", "= desc self.search_link = link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms =", "on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make", "int of the price #>> reserve and buy now values are returned based", "= self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price", "and buy now values are returned based on the listing type #>> includes", "two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters(): removes", "\" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3)", "representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return", "- {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "periods from the the price info string and splits into list format #>>", "not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of", "findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1)", "of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return f\"{self.title}", "== \"Reserve not met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a", "price info string and splits into list format #>> get_price(): based on the", "and periods from the the price info string and splits into list format", "int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the vehicle #>> based on preference", "range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc,", "returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "index of buy now or reserve price derived from source code, removes $", "== \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the", "or reserve price derived from source code, removes $ and returns an int", "self.price_info = price_info self.desc = desc self.search_link = link self.year = self.findYear() self.make_model", "parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a", "def __init__(self, title, odometer, price_info, desc, link): self.title = title self.odometer = odometer", "title, odometer, price_info, desc, link): self.title = title self.odometer = odometer self.price_info =", "def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values,", "self.odometer = odometer self.price_info = price_info self.desc = desc self.search_link = link self.year", "returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters():", "get_price(1), None elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the", "code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list) == 3: return title_list[1]", "get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the vehicle #>> based on", "elif len(info) == 2: if info[0] == \"Reserve met\" or info[0] == \"No", "if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1),", "== \"Reserve met\" or info[0] == \"No reserve\" or info[0] == \"Reserve not", "returns a representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "#>> returns the listing type of the vehicle #>> based on preference #>>", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle", "values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "#>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type #>>", "values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list)", "return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for title in iter(title_list): make_model", "int #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self):", "of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title}", "if len(title_list) == 3: return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a", "make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self):", "= price_info self.desc = desc self.search_link = link self.year = self.findYear() self.make_model =", "the make and model of the vehicle #>> includes hard coded values based", "return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of the vehicle #>>", "the price #>> reserve and buy now values are returned based on the", "reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters(): removes the commas", "returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2]", "== \"auction2\": return get_price(1), None elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the", "includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0])", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle obtained from the title as", "on the listing type #>> remove_delimiters(): removes the commas and periods from the", "title in iter(title_list): make_model += title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "values are returned based on the listing type #>> includes hard coded values", "from the title as an int #>> includes hard coded values based on", "includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info =", "code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\",", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km", "return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on", "based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list) ==", "\") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if", "an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return", "== \"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None elif", "representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return", "self.desc = desc self.search_link = link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms", "on the listing type #>> includes hard coded values based on source code", "derived from source code, removes $ and returns an int of the price", "title_list[2] make_model = \"\" next(iter(title_list)) for title in iter(title_list): make_model += title +", "hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\"", "return get_price(1), None elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "self.title.split() if len(title_list) == 3: return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list))", "met\": return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the", "return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type ==", "met\" or info[0] == \"No reserve\" or info[0] == \"Reserve not met\": return", "value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\"))", "= title self.odometer = odometer self.price_info = price_info self.desc = desc self.search_link =", "the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation", "on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list) == 3:", "\"auction1\" elif len(info) == 2: if info[0] == \"Reserve met\" or info[0] ==", "returns the year of the vehicle obtained from the title as an int", "#>> get_price(): based on the index of buy now or reserve price derived", "def check_listing_type(self): info = self.price_info.split(\" \") if len(info) == 4: return \"auction1\" elif", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of the vehicle #>> includes hard", "\"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the vehicle", "self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of", "return \"auction1\" elif len(info) == 2: if info[0] == \"Reserve met\" or info[0]", "class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of", "and splits into list format #>> get_price(): based on the index of buy", "variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link): self.title", "\"auction2\": return get_price(1), None elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two", "a representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self):", "\" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "info[0] == \"No reserve\" or info[0] == \"Reserve not met\": return \"auction2\" else:", "been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates", "title_list = self.title.split() if len(title_list) == 3: return title_list[1] + title_list[2] make_model =", "= \"\" next(iter(title_list)) for title in iter(title_list): make_model += title + \" \"", "model of the vehicle #>> includes hard coded values based on source code", "type #>> remove_delimiters(): removes the commas and periods from the the price info", "based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value):", "elif self.listing_type == \"classified\": return None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type", "None, get_price(1) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the listing type of the vehicle #>> based", "return \"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle", "coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "make_model = \"\" next(iter(title_list)) for title in iter(title_list): make_model += title + \"", "#>> init method creates a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price =", "a representation of the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self):", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return", "self.year = self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price,", "return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the", "= odometer self.price_info = price_info self.desc = desc self.search_link = link self.year =", "price_info self.desc = desc self.search_link = link self.year = self.findYear() self.make_model = self.findMakeModel()", "of the vehicle #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "now or reserve price derived from source code, removes $ and returns an", "odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters(): removes the", "def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split()", "listing type #>> remove_delimiters(): removes the commas and periods from the the price", "based on the listing type #>> remove_delimiters(): removes the commas and periods from", "commas and periods from the the price info string and splits into list", "returns the listing type of the vehicle #>> based on preference #>> includes", "and buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters(): removes the commas and", "reserve and buy now values are returned based on the listing type #>>", "instance of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles =", "make_model += title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer", "returns the make and model of the vehicle #>> includes hard coded values", "a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info,", "code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if len(info) == 4: return", "title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} -", "int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the", "from the the price info string and splits into list format #>> get_price():", "processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link): self.title = title self.odometer", "format #>> get_price(): based on the index of buy now or reserve price", "#>> based on preference #>> includes hard coded values based on source code", "\" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the odometer", "title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of", "the commas and periods from the the price info string and splits into", "creates a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer,", "hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0", "self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of", "self.title = title self.odometer = odometer self.price_info = price_info self.desc = desc self.search_link", "self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles +=", "return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer and", "\"No reserve\" or info[0] == \"Reserve not met\": return \"auction2\" else: return \"classified\"", "#>> this class creates an instance of each vehicle that has been parsed", "the price info string and splits into list format #>> get_price(): based on", "vehicle #>> based on preference #>> includes hard coded values based on source", "{self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self):", "\"auction2\" else: return \"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title,", "of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a", "the listing type #>> remove_delimiters(): removes the commas and periods from the the", "list format #>> get_price(): based on the index of buy now or reserve", "splits into list format #>> get_price(): based on the index of buy now", "an instance of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles", "+ \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the", "price #>> reserve and buy now values are returned based on the listing", "__init__(self, title, odometer, price_info, desc, link): self.title = title self.odometer = odometer self.price_info", "on preference #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "code, removes $ and returns an int of the price #>> reserve and", "== 2: if info[0] == \"Reserve met\" or info[0] == \"No reserve\" or", "\"\" next(iter(title_list)) for title in iter(title_list): make_model += title + \" \" return", "price_info, desc, link): self.title = title self.odometer = odometer self.price_info = price_info self.desc", "for title in iter(title_list): make_model += title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return f\"{self.title} -", "based on preference #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and", "+= title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value", "into list format #>> get_price(): based on the index of buy now or", "init method creates a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self,", "odometer self.price_info = price_info self.desc = desc self.search_link = link self.year = self.findYear()", "== \"No reserve\" or info[0] == \"Reserve not met\": return \"auction2\" else: return", "\") if len(info) == 4: return \"auction1\" elif len(info) == 2: if info[0]", "+ title_list[2] make_model = \"\" next(iter(title_list)) for title in iter(title_list): make_model += title", "coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if", "remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type ==", "desc self.search_link = link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS()", "removes $ and returns an int of the price #>> reserve and buy", "#>> returns the make and model of the vehicle #>> includes hard coded", "= self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return", "buy now values are returned based on the listing type #>> includes hard", "title self.odometer = odometer self.price_info = price_info self.desc = desc self.search_link = link", "make and model of the vehicle #>> includes hard coded values based on", "#>> returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\"", "to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link): self.title =", "the vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} -", "the listing type of the vehicle #>> based on preference #>> includes hard", "now values are returned based on the listing type #>> includes hard coded", "includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list =", "hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split()", "= self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles", "self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type == \"classified\": return None, get_price(1)", "includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list =", "source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self): title_list = self.title.split() if len(title_list) == 3: return", "values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def", "are returned based on the listing type #>> includes hard coded values based", "vehicle #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findMakeModel(self):", "= self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle", "values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if", "\"classified\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer and price", "odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\"", "odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return f\"{self.title} - {self.odometer} - {self.price_info}\"", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type", "in iter(title_list): make_model += title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "{self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "3: return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for title in iter(title_list):", "= link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type =", "+= 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle obtained from the", "of the vehicle obtained from the title as an int #>> includes hard", "the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init", "vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of", "the year of the vehicle obtained from the title as an int #>>", "price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "= self.title.split() if len(title_list) == 3: return title_list[1] + title_list[2] make_model = \"\"", "and model of the vehicle #>> includes hard coded values based on source", "= self.price_info.split(\" \") if len(info) == 4: return \"auction1\" elif len(info) == 2:", "source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if len(info) == 4:", "coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \")", "year of the vehicle obtained from the title as an int #>> includes", "type #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self):", "return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\":", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> this class creates an instance of each vehicle that has been", "remove_delimiters(): removes the commas and periods from the the price info string and", "$ and returns an int of the price #>> reserve and buy now", "f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle", "self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year", "#>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info", "obtained from the title as an int #>> includes hard coded values based", "link): self.title = title self.odometer = odometer self.price_info = price_info self.desc = desc", "price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price):", "info = self.price_info.split(\" \") if len(info) == 4: return \"auction1\" elif len(info) ==", "preference #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self):", "return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\":", "#>> returns the year of the vehicle obtained from the title as an", "source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\",", "title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer", "class creates an instance of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class", "value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return", "of the price #>> reserve and buy now values are returned based on", "of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "if info[0] == \"Reserve met\" or info[0] == \"No reserve\" or info[0] ==", "len(info) == 4: return \"auction1\" elif len(info) == 2: if info[0] == \"Reserve", "method creates a range of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title,", "source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and", "returns an int of the price #>> reserve and buy now values are", "based on the listing type #>> includes hard coded values based on source", "get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type ==", "== 3: return title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for title in", "be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link): self.title = title", "self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice()", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of", "self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "creates an instance of each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle:", "elif self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type == \"classified\": return None,", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \"", "info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link): self.title = title self.odometer =", "the vehicle #>> based on preference #>> includes hard coded values based on", "get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type == \"classified\":", "and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model", "buy now or reserve price derived from source code, removes $ and returns", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer and price info", "the title as an int #>> includes hard coded values based on source", "a representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "desc, link): self.title = title self.odometer = odometer self.price_info = price_info self.desc =", "as an int #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "odometer, price_info, desc, link): self.title = title self.odometer = odometer self.price_info = price_info", "len(info) == 2: if info[0] == \"Reserve met\" or info[0] == \"No reserve\"", "vehicle title, odometer and price info #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __str__(self): return f\"{self.title} - {self.odometer}", "def info(self): return f\"{self.title} - {self.odometer} - {self.price_info}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation", "= self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the", "hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\"", "vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "self.kms = self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1", "\"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing", "title as an int #>> includes hard coded values based on source code", "the the price info string and splits into list format #>> get_price(): based", "of variables to be processed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def __init__(self, title, odometer, price_info, desc, link):", "def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type", "findMakeModel(self): title_list = self.title.split() if len(title_list) == 3: return title_list[1] + title_list[2] make_model", "return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "source code, removes $ and returns an int of the price #>> reserve", "check_listing_type(self): info = self.price_info.split(\" \") if len(info) == 4: return \"auction1\" elif len(info)", "self.price_info.split(\" \") def remove_delimiters(value): return value.replace(\",\", \"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0])", "integer value of the odometer #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findKMS(self): without_km = self.odometer[:-2] return int(without_km.replace(\",\",", "link self.year = self.findYear() self.make_model = self.findMakeModel() self.kms = self.findKMS() self.listing_type = self.check_listing_type()", "findYear(self): return int(self.title.split()[0]) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the make and model of the vehicle", "from source code, removes $ and returns an int of the price #>>", "based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if len(info)", "representation of the vehicle title #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def title_info(self): return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns", "buy_now_price(return_arg=2) based on the listing type #>> remove_delimiters(): removes the commas and periods", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if len(info) == 4: return \"auction1\"", "on the index of buy now or reserve price derived from source code,", "return f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer and", "each vehicle that has been parsed #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", "2: if info[0] == \"Reserve met\" or info[0] == \"No reserve\" or info[0]", "self.findKMS() self.listing_type = self.check_listing_type() self.reserve_price, self.buy_now_price = self.findPrice() Vehicle.num_vehicles += 1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>>", "on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \") if len(info) ==", "on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list = self.price_info.split(\" \") def remove_delimiters(value): return", "iter(title_list): make_model += title + \" \" return make_model.strip() #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns an", "if len(info) == 4: return \"auction1\" elif len(info) == 2: if info[0] ==", "\"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None elif self.listing_type", "#>> reserve and buy now values are returned based on the listing type", "listing type of the vehicle #>> based on preference #>> includes hard coded", "self.listing_type == \"auction1\": return get_price(1), get_price(3) elif self.listing_type == \"auction2\": return get_price(1), None", "and returns an int of the price #>> reserve and buy now values", "= 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range of variables to be", "get_price(): based on the index of buy now or reserve price derived from", "def findMakeModel(self): title_list = self.title.split() if len(title_list) == 3: return title_list[1] + title_list[2]", "an int #>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def", "1 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns the year of the vehicle obtained from the title", "removes the commas and periods from the the price info string and splits", "#>> remove_delimiters(): removes the commas and periods from the the price info string", "title_list[1] + title_list[2] make_model = \"\" next(iter(title_list)) for title in iter(title_list): make_model +=", "info[0] == \"Reserve met\" or info[0] == \"No reserve\" or info[0] == \"Reserve", "the index of buy now or reserve price derived from source code, removes", "coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def check_listing_type(self): info = self.price_info.split(\" \")", "self.odometer[:-2] return int(without_km.replace(\",\", \"\")) #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based", "f\"{self.title}\" #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> returns a representation of the vehicle title, odometer and price", "of buy now or reserve price derived from source code, removes $ and", "#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> class Vehicle: num_vehicles = 0 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #>> init method creates a range", "reserve price derived from source code, removes $ and returns an int of", "\"\").replace(\".\", \" \").split() def get_price(index_of_price): return int(remove_delimiters((price_info_list[index_of_price])[1:])[0]) if self.listing_type == \"auction1\": return get_price(1),", "== 4: return \"auction1\" elif len(info) == 2: if info[0] == \"Reserve met\"", "#>> includes hard coded values based on source code #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> def findPrice(self): price_info_list" ]
[ "from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from pypeerassets.provider import Provider def", "import Kutil from pypeerassets.networks import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int)", "be converted to TxOut object before signing''' index = utxo.txout # utxo index", "ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, )", "def make_raw_transaction( network: str, inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version:", "from tx size in bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size /", "from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import (", "list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create", "import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx,", "return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut:", "make_raw_transaction( network: str, inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1,", "Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes) ->", "timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants,", "utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) ->", "MutableTransaction: '''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp,", "signing''' index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider,", "MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output = find_parent_outputs(provider, unsigned_tx.ins[0])", "Decimal from math import ceil from time import time from btcpy.structs.address import Address", "import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from tx size", "import time from btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig,", "calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from tx size in bytes''' min_fee", "minimum return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create", "-> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address)", "TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n,", "btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from", "network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants,", "outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to", "to TxOut object before signing''' index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid,", "pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def", "btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime,", "utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key:", "script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()),", "script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str,", ") def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to design of the", "import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx", "outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def", "design of the btcpy library, TxIn object must be converted to TxOut object", "'''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address:", "str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr", "Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network)", "Kutil from pypeerassets.networks import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) ->", "size in bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) *", "Decimal: '''return tx fee from tx size in bytes''' min_fee = Decimal(0.01) #", "'''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs,", "object must be converted to TxOut object before signing''' index = utxo.txout #", "'''return tx fee from tx size in bytes''' min_fee = Decimal(0.01) # minimum", "bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def", "address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr =", "str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants,", "before signing''' index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider:", "= Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int, script:", "time import time from btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript,", "PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs,", "Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import", "MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query", "stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create", "-> MutableTransaction: '''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version,", "network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version,", "P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create", "version: int=1, ) -> MutableTransaction: '''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"):", "script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript:", "import Decimal from math import ceil from time import time from btcpy.structs.address import", "import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil", "p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network)", "ceil from time import time from btcpy.structs.address import Address from btcpy.structs.script import (", "Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output = find_parent_outputs(provider, unsigned_tx.ins[0]) return key.sign_transaction(parent_output,", "= net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal,", "TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs:", "fee from tx size in bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size", "StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH)", "pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from tx", "str, inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1, ) ->", "/ 1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script'''", "locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider:", "key: Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output = find_parent_outputs(provider, unsigned_tx.ins[0]) return", "( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction,", "TxIn, TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from pypeerassets.provider", "n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs: list, locktime: Locktime, timestamp:", "sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output", "P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn,", "network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value:", "the btcpy library, TxIn object must be converted to TxOut object before signing'''", "-> Transaction: '''sign transaction with Kutil''' parent_output = find_parent_outputs(provider, unsigned_tx.ins[0]) return key.sign_transaction(parent_output, unsigned_tx)", "( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil from", "'''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr)", "min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data)", "int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants,", "transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime,", "value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params =", "PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks import", "find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to design of the btcpy library,", "Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks", "ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value *", "converted to TxOut object before signing''' index = utxo.txout # utxo index return", "# minimum return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript:", "Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return)", "(OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str) ->", "tx size in bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000)", "utxo: TxIn) -> TxOut: '''due to design of the btcpy library, TxIn object", "if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction(", "Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw transaction''' network_params =", "-> Decimal: '''return tx fee from tx size in bytes''' min_fee = Decimal(0.01)", "TxOut object before signing''' index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index])", "return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs,", "str, value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params", "int) -> Decimal: '''return tx fee from tx size in bytes''' min_fee =", "version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime,", "TxIn) -> TxOut: '''due to design of the btcpy library, TxIn object must", "<gh_stars>0 '''transaction assembly/dissasembly''' from decimal import Decimal from math import ceil from time", "TxOut: '''due to design of the btcpy library, TxIn object must be converted", "btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil import", "min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data:", "def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params =", "-> TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination),", "object before signing''' index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def", "-> TxOut: '''due to design of the btcpy library, TxIn object must be", "must be converted to TxOut object before signing''' index = utxo.txout # utxo", "network: str, inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1, )", "-> NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network:", "of the btcpy library, TxIn object must be converted to TxOut object before", "decimal import Decimal from math import ceil from time import time from btcpy.structs.address", "return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign", "unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output = find_parent_outputs(provider,", "* network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs: list, locktime:", "from btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, )", "bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) * min_fee) def", "import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction", "int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw transaction''' network_params = net_query(network) if", "'''transaction assembly/dissasembly''' from decimal import Decimal from math import ceil from time import", "return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn)", "= utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction,", "from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from", "ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut,", "def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return", "index = utxo.txout # utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx:", "# utxo index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil)", "tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut object'''", "outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw", "locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to design", "net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs:", "btcpy library, TxIn object must be converted to TxOut object before signing''' index", "import ceil from time import time from btcpy.structs.address import Address from btcpy.structs.script import", "* min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack =", "timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw transaction''' network_params = net_query(network)", "network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs: list, locktime: Locktime,", "from pypeerassets.networks import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal:", "= net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, )", "TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def", "in bytes''' min_fee = Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) * min_fee)", "net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n:", "= StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash", "StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, )", "1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with", "Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from tx size in", "index return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction:", "pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size:", "= net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str,", "def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut: '''create TxOut", "string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) ->", "locktime: Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw transaction''' network_params", "= Decimal(0.01) # minimum return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes)", "nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str, address: str)", "return Decimal(ceil(tx_size / 1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata", "Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int, script: ScriptSig)", "list, locktime: Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction: '''create raw transaction'''", "def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee from tx size in bytes'''", "network=network_params.btcpy_constants, ) return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider,", "library, TxIn object must be converted to TxOut object before signing''' index =", "n: int, script: ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network) return", "pypeerassets.networks import net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return", "raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs,", "from decimal import Decimal from math import ceil from time import time from", "def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with Kutil'''", "network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network:", ") -> MutableTransaction: '''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx(", "def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to design of the btcpy", "object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction(", "NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params", "TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from pypeerassets.provider import", "Transaction, TxIn, TxOut, ) from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from", "inputs: list, outputs: list, locktime: Locktime, timestamp: int=int(time()), version: int=1, ) -> MutableTransaction:", "assembly/dissasembly''' from decimal import Decimal from math import ceil from time import time", "math import ceil from time import time from btcpy.structs.address import Address from btcpy.structs.script", "NulldataScript, P2pkhScript, ScriptSig, StackData, ) from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction,", ") from pypeerassets.kutil import Kutil from pypeerassets.networks import net_query from pypeerassets.provider import Provider", "(P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network:", "net_query(network) if network_params.network_name.startswith(\"peercoin\"): return PeercoinMutableTx( version=version, timestamp=timestamp, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) return", "TxIn object must be converted to TxOut object before signing''' index = utxo.txout", "from time import time from btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript,", "'''due to design of the btcpy library, TxIn object must be converted to", ") from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from", "1000) * min_fee) def nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack", "return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list,", ") return MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo:", "tx fee from tx size in bytes''' min_fee = Decimal(0.01) # minimum return", "time from btcpy.structs.address import Address from btcpy.structs.script import ( NulldataScript, P2pkhScript, ScriptSig, StackData,", "to design of the btcpy library, TxIn object must be converted to TxOut", "ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due", "script: ScriptSig) -> TxOut: '''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value", "from btcpy.structs.transaction import ( Locktime, PeercoinMutableTx, MutableTransaction, Transaction, TxIn, TxOut, ) from pypeerassets.kutil", "int=1, ) -> MutableTransaction: '''create raw transaction''' network_params = net_query(network) if network_params.network_name.startswith(\"peercoin\"): return", "network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut: '''due to design of", "net_query from pypeerassets.provider import Provider def calculate_tx_fee(tx_size: int) -> Decimal: '''return tx fee", "Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction with Kutil''' parent_output =", "Provider, utxo: TxIn) -> TxOut: '''due to design of the btcpy library, TxIn", "value=int(value * network_params.denomination), n=n, script_pubkey=script) def make_raw_transaction( network: str, inputs: list, outputs: list,", "addr = Address.from_string(network=network_params.btcpy_constants, string=address) return P2pkhScript(addr) def tx_output(network: str, value: Decimal, n: int,", "from math import ceil from time import time from btcpy.structs.address import Address from", "TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index]) def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction, key: Kutil) -> Transaction: '''sign transaction", "NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack) def p2pkh_script(network: str,", "return NulldataScript(stack) def p2pkh_script(network: str, address: str) -> P2pkhScript: '''create pay-to-key-hash (P2PKH) script'''", "P2pkhScript: '''create pay-to-key-hash (P2PKH) script''' network_params = net_query(network) addr = Address.from_string(network=network_params.btcpy_constants, string=address) return", "'''create TxOut object''' network_params = net_query(network) return TxOut(network=network_params.btcpy_constants, value=int(value * network_params.denomination), n=n, script_pubkey=script)", "version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut:", "MutableTransaction( version=version, ins=inputs, outs=outputs, locktime=locktime, network=network_params.btcpy_constants, ) def find_parent_outputs(provider: Provider, utxo: TxIn) ->", "nulldata_script(data: bytes) -> NulldataScript: '''create nulldata (OP_return) script''' stack = StackData.from_bytes(data) return NulldataScript(stack)" ]
[ "filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size =", "fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False,", "= plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False", "data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False,", "plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False ) plt.tight_layout() plt.savefig(name +", "name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size,", "print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data =", "in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data)))", "np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12,", "numpy as np from matplotlib import pyplot as plt for filename in glob.glob(\"*.dat\"):", "ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False,", "plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False )", "left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False ) plt.tight_layout() plt.savefig(name + \".png\") plt.close()", "= int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params(", "filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig,", "int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False,", "glob import numpy as np from matplotlib import pyplot as plt for filename", "as np from matplotlib import pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename)", "size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False,", "import pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data", "import glob import numpy as np from matplotlib import pyplot as plt for", "glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data", "import numpy as np from matplotlib import pyplot as plt for filename in", "pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data =", "plt for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\")", "delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12))", "from matplotlib import pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename) name =", "for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size", "as plt for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0] data = np.loadtxt(filename,", "matplotlib import pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename) name = filename.split(\".\")[0]", "data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False,", "np from matplotlib import pyplot as plt for filename in glob.glob(\"*.dat\"): print(filename) name", "= data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False,", "= filename.split(\".\")[0] data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size))", "5.12)) ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False ) plt.tight_layout()", "data = np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax", "size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax = plt.subplots(figsize=(5.12, 5.12)) ax.imshow(data)", "bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False ) plt.tight_layout() plt.savefig(name + \".png\")", "= np.loadtxt(filename, delimiter=\",\") size = int(np.sqrt(len(data))) data = data.reshape((size, size)) fig, ax =", "ax.imshow(data) plt.tick_params( bottom=False, left=False, right=False, top=False, labelbottom=False, labelleft=False, labelright=False, labeltop=False ) plt.tight_layout() plt.savefig(name" ]
[ "ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%%", "6 00:25:27 2017 @author: Wayne \"\"\" import pandas as pd import xgboost as", "data 'lambda ' :4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight':", "label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals", "| set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude',", "import numpy as np from sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False]", "label=z) evallist = [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist,", "values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude',", "xgb import numpy as np from sklearn.model_selection import train_test_split import pickle #%% mydf1=", "#%% parms = {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD", "[None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc,", "pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest = testdf", "are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not need to", "= mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%% X = X.drop(['pickup_loc','dropoff_loc'],axis=1) #%%", "train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest", "print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster", "validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv =", "\"\"\" import pandas as pd import xgboost as xgb import numpy as np", "prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%%", "= kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc],", "= 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 =", "mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 =", "not need to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min()", ", label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881,", "num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%%", "'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2", "on Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\" import pandas as pd", "Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\" import pandas as pd import", "testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14,", "\"\"\" Created on Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\" import pandas", "import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1", "# -*- coding: utf-8 -*- \"\"\" Created on Sun Aug 6 00:25:27 2017", "= %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train = xgb.DMatrix(X, label=z)", "i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc", "index=False) #%% with open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%% for d", "maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission =", "'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_')", "early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data", "of cpu core to use #%% split training set to validation set Xtrain,", "= [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc],", "test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis =", "#%% training all the data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')]", "df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc =", "for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() == train.shape[0]", "coding: utf-8 -*- \"\"\" Created on Sun Aug 6 00:25:27 2017 @author: Wayne", "Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values,", "= lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear',", "#%% with open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%% for d in", "0 else print('oops') print('We do not need to worry about missing values.') if", "1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X =", "model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest", "= [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100)", "sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc']", "= np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] =", "worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else", "test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not", "xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of", "df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']])", "#Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval", "= pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 =", "do not need to worry about missing values.') if train.count().min() == train.shape[0] and", "= mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%% X", "print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train and", "MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc']", "need to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() ==", "= model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv',", "and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do", "lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta'", "of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of data", "open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean())", "-*- \"\"\" Created on Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\" import", "#%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%%", "#%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of a", "use this percentage of data 'lambda ' :4, #L2 regularization term,>1 more conservative", "batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] =", "=%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train,", ":0.025, 'subsample':0.8,#SGD will use this percentage of data 'lambda ' :4, #L2 regularization", "= train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr =", "for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude',", "#%% split training set to validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X,", "ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest})", "(mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train", "random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val =", "test.id.values))== 0 else print('oops') print('We do not need to worry about missing values.')", "else print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans", "training all the data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model", "utf-8 -*- \"\"\" Created on Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\"", "'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of data 'lambda ' :4,", "{'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this", "[(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%%", "mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%% X =", "= xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms,", "in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%%", "print('We do not need to worry about missing values.') if train.count().min() == train.shape[0]", "verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train =", "== test.shape[0] else print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique()))))", "= mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest =", "#maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage", "values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag", "data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model =", "#%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() ==", "store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import", "00:25:27 2017 @author: Wayne \"\"\" import pandas as pd import xgboost as xgb", "maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train", "= xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest =", "= [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist,", "'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:,", "as handle: b = pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id", "mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%%", "sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not need", "in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops')", "z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain)", "'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%%", "#%% print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train and test", "kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']])", "verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id':", "data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880,", "to validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv", "#L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number", "= xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals", "'lambda ' :4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10,", "':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu core to use #%%", "if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has", "np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb')", "handle: b = pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is", "print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train = xgb.DMatrix(X,", "1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1)", "tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of data 'lambda '", "#%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test", "' :4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread'", "ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with", "== train.shape[0] else print('oops') print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))==", "train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has only", "= evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1", "np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude',", "set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval,", "random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr,", "X = mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda", "= xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f,", "#number of cpu core to use #%% split training set to validation set", "train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain,", "'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20,", "'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle: b = pickle.load(handle)", "xgboost as xgb import numpy as np from sklearn.model_selection import train_test_split import pickle", "mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%%", "mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest)", "pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1", "'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]=", "data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration))", "[(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30,", "-*- coding: utf-8 -*- \"\"\" Created on Sun Aug 6 00:25:27 2017 @author:", "num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest", "if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not need to worry about", "testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf):", "'min_child_weight': 10, 'nthread' :3} #number of cpu core to use #%% split training", "is unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train and test sets are", "a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of data 'lambda", "this percentage of data 'lambda ' :4, #L2 regularization term,>1 more conservative 'colsample_bytree", "training set to validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2,", "= pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle:", "test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle: b =", "= testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms =", "set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values,", "print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We", "= 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X", "(data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100)", "prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1)", "distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not need to worry", "= pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z =", "for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_')", "to use #%% split training set to validation set Xtrain, Xval, Ztrain, Zval", "evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest =", "mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest", "more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu core", "mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%% X = X.drop(['pickup_loc','dropoff_loc'],axis=1) #%% Xtest=Xtest.drop(['pickup_loc','dropoff_loc'],axis=1)", "#%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1)", "sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values))", "= xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val,", "= mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2))", "'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score", "mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind])", "= pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if", "data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum", "unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train and test sets are distinct.')", "will use this percentage of data 'lambda ' :4, #L2 regularization term,>1 more", "== train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has only two", "coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000]", "evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training", "test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1", "df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']):", "kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc,", "xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')]", "z = np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%%", "pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration", "testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df", "test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val", "= {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use", "(mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc", "depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of", "Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist", "Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5,", "print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops') print('Train and test sets", "of data 'lambda ' :4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1,", "pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle: b", "data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model = xgb.train(parms, data_train,", "mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test =", "= pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 =", "np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse =", "'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu core to use #%% split", "b = pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.')", "xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals =", "xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test)", "model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False)", "#%% #%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id,", "evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the", "10, 'nthread' :3} #number of cpu core to use #%% split training set", "testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z", "else print('oops') print('We do not need to worry about missing values.') if train.count().min()", "pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%%", "prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1)", "'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in", "np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans =", "regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of", "all the data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model =", "label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model", "'subsample':0.8,#SGD will use this percentage of data 'lambda ' :4, #L2 regularization term,>1", "print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from", "test.shape[0] else print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%%", "pandas as pd import xgboost as xgb import numpy as np from sklearn.model_selection", "#%% Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude',", "else print('oops') print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else", "Created on Sun Aug 6 00:25:27 2017 @author: Wayne \"\"\" import pandas as", "submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as", "percentage of data 'lambda ' :4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9,", "with open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%% for d in (mydf,testdf):", "d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique() == train.shape[0] else", "data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'),", "to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0]", "'eta' :0.025, 'subsample':0.8,#SGD will use this percentage of data 'lambda ' :4, #L2", "train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc", "enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1)", "parms = {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025, 'subsample':0.8,#SGD will", "z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1) #%% X = X.drop(['pickup_loc','dropoff_loc'],axis=1)", "split training set to validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z,", "import pandas as pd import xgboost as xgb import numpy as np from", "print('oops') print('We do not need to worry about missing values.') if train.count().min() ==", "#%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1", "'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for", "n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train = xgb.DMatrix(X, label=z) evallist =", "kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for i,loc", "cpu core to use #%% split training set to validation set Xtrain, Xval,", "if train.id.nunique() == train.shape[0] else print('oops') print('Train and test sets are distinct.') if", "= train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval ,", "{}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values,", "missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The", "only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords", "test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist =", "numpy as np from sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z", "rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of a tree", "@author: Wayne \"\"\" import pandas as pd import xgboost as xgb import numpy", "= xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth", "use #%% split training set to validation set Xtrain, Xval, Ztrain, Zval =", "import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind", "term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu", "Aug 6 00:25:27 2017 @author: Wayne \"\"\" import pandas as pd import xgboost", ":3} #number of cpu core to use #%% split training set to validation", "evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission", "set to validation set Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1)", "= pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1) #%% mydf1 = mydf1[mydf1['outliers']==False]", "xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round", "= np.log(data.trip_duration+1) X = mydf1 Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse", "Xtest = testdf data_test = xgb.DMatrix(Xtest) #%% rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms", "data_train, num_boost_round=880, evals = evallist, maximize=False, verbose_eval=100) #%% #%% ztest = model.predict(data_test) #%%", "2017 @author: Wayne \"\"\" import pandas as pd import xgboost as xgb import", "test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) |", "two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans coords =", "pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%% print('Id is unique.') if train.id.nunique()", "z,zp:np.sqrt(np.mean((z-zp)**2)) #%% parms = {'max_depth':14, #maximum depth of a tree 'objective':'reg:linear', 'eta' :0.025,", "'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu core to use", "prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis =", "#%% train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i]", "Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval,", "train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval)", "print('oops') print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops')", "= np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans", "evallist = [(data_train, 'train')] model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist, maximize=False,", ":4, #L2 regularization term,>1 more conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3}", "pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis", "'nthread' :3} #number of cpu core to use #%% split training set to", "%1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all the data data_train = xgb.DMatrix(X, label=z) evallist", "import xgboost as xgb import numpy as np from sklearn.model_selection import train_test_split import", "= MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in (mydf,testdf): df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:,", "#%% ztest = model.predict(data_test) #%% ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration':", "'rb') as handle: b = pickle.load(handle) #%% for d in (mydf,testdf): print(d.Temp.mean()) #%%", "= np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle',", "Wayne \"\"\" import pandas as pd import xgboost as xgb import numpy as", "from sklearn.cluster import MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude',", "submission.to_csv('submission_1.csv', index=False) #%% with open('filename.pickle', 'rb') as handle: b = pickle.load(handle) #%% for", "conservative 'colsample_bytree ':0.9, 'colsample_bylevel':1, 'min_child_weight': 10, 'nthread' :3} #number of cpu core to", "from sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X", "np from sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1)", "model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score =", "train_loc = pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1", "import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X = mydf1 Xtest =", "evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals =", "Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1)", "= evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) #%% training all", "xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr,", "= kmeans.predict(df[['pickup_latitude', 'pickup_longitude']]) df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']]) #%% train_loc = [None]*2;test_loc=[None]*2 for", "core to use #%% split training set to validation set Xtrain, Xval, Ztrain,", "has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique())))) #%% Kmeans from sklearn.cluster import MiniBatchKMeans", "train.shape[0] else print('oops') print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0", "len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops') print('We do not need to worry about missing", "train_loc = [None]*2;test_loc=[None]*2 for i,loc in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] =", "'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist, early_stopping_rounds=30, maximize=False,", "the data data_train = xgb.DMatrix(X, label=z) evallist = [(data_train, 'train')] model = xgb.train(parms,", "in enumerate(['pickup_loc','dropoff_loc']): train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc =", "= pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis = 1)", "and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique())", "MiniBatchKMeans coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[['dropoff_latitude', 'dropoff_longitude']].values, testdf[['pickup_latitude', 'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind =", "'pickup_longitude']].values, testdf[['dropoff_latitude', 'dropoff_longitude']].values)) sample_ind = np.random.permutation(len(coords))[:500000] kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind]) for df in", "Zval = train_test_split(X, z, test_size=0.2, random_state=1) #Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1) data_tr", "as np from sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z =", "sklearn.model_selection import train_test_split import pickle #%% mydf1= mydf[outliers.outliers==False] z = np.log(data.trip_duration+1) X =", "train.id.nunique() == train.shape[0] else print('oops') print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values,", "pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_') test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_') train_loc = pd.concat(train_loc,axis=1) test_loc =", "mydf1[mydf1['outliers']==False] mydf1 = mydf1.drop(['id','outliers'],axis=1) z = mydf1.log_trip_duration X = mydf1.drop(['log_trip_duration'],axis=1) Xtest = testdf1.drop('id',axis=1)", "pd.concat(train_loc,axis=1) test_loc = pd.concat(test_loc,axis=1) #%% mydf1 = pd.concat([mydf,train_loc],axis = 1) testdf1 = pd.concat([testdf,test_loc],axis", "as pd import xgboost as xgb import numpy as np from sklearn.model_selection import", "pd import xgboost as xgb import numpy as np from sklearn.model_selection import train_test_split", "as xgb import numpy as np from sklearn.model_selection import train_test_split import pickle #%%", "train.shape[0] and test.count().min() == test.shape[0] else print('oops') print('The store_and_fwd_flag has only two values", "about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops')" ]
[ "w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name)", "b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if", "tf import numpy as np import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels):", "relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"):", "dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as", "relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output", "conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME')", "bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\")", "open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1]", "new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list()", "new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def", "return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1]) conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024]) classmap=tf.matmul(conv6_resized,label_w) classmap=tf.reshape(classmap,[-1,224,224]) return", "self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name):", "def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else:", "conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True):", "as np import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with", "name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return", "return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0))", "dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255.", "relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with", "shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False):", "relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2])", "f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME')", "layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name):", "with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1])", "b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b))", "def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc", "w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1')", "b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope:", "cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:])", "fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0))", "b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\")", "return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2')", "tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1]) conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024])", "layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0))", "def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w))", "scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as", "fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\")", "cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw))", "conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01))", "pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def", "scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\")", "import numpy as np import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68]", "return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope)", "relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with", "coding: utf-8 -*- import tensorflow as tf import numpy as np import cPickle", "pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\")", "bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7))", "relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6):", "__init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def", "fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\")", "def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return", "layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope:", "tensorflow as tf import numpy as np import cPickle import ipdb class Detector():", "conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.))", "cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc", "with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def", "cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope:", "rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3')", "ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name):", "f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name)", "with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim])", "import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def", "as tf import numpy as np import cPickle import ipdb class Detector(): def", "relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4')", "scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with", "bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096))", "else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list()", "import tensorflow as tf import numpy as np import cPickle import ipdb class", "-*- import tensorflow as tf import numpy as np import cPickle import ipdb", "gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label)", "cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f)", "with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return", "relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w)", "conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu", "pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\")", "utf-8 -*- import tensorflow as tf import numpy as np import cPickle import", "relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return", "scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name)", "import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f:", "tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\")", "fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return", "f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def", "return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias", "return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def", "r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\")", "relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\")", "relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\")", "gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1]) conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024]) classmap=tf.matmul(conv6_resized,label_w)", "cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def", "# -*- coding: utf-8 -*- import tensorflow as tf import numpy as np", "relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\") pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\")", "def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\")", "def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with", "self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name):", "get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as", "pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1]) conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024]) classmap=tf.matmul(conv6_resized,label_w) classmap=tf.reshape(classmap,[-1,224,224]) return classmap", "tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim])", "pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3') relu4_1=self.conv_layer(pool3,\"conv4_1\") relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01))", "-*- coding: utf-8 -*- import tensorflow as tf import numpy as np import", "inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3) relu1_1=self.conv_layer(bgr,\"conv1_1\") relu1_2=self.conv_layer(relu1_1,\"conv1_2\") pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1') relu2_1=self.conv_layer(pool1,\"conv2_1\") relu2_2=self.conv_layer(relu2_1,\"conv2_2\") pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2') relu3_1=self.conv_layer(pool2,\"conv3_1\") relu3_2=self.conv_layer(relu3_1,\"conv3_2\") relu3_3=self.conv_layer(relu3_2,\"conv3_3\")", "with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:])", "relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224])", "relu4_2=self.conv_layer(relu4_1,\"conv4_2\") relu4_3=self.conv_layer(relu4_2,\"conv4_3\") pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4') relu5_1=self.conv_layer(pool4,\"conv5_1\") relu5_2=self.conv_layer(relu5_1,\"conv5_2\") relu5_3=self.conv_layer(relu5_2,\"conv5_3\") conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],\"conv6\") gap=tf.reduce_mean(conv6,[1,2]) with tf.variable_scope(\"GAP\"): gap_w=tf.get_variable(\"W\",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01)) output=tf.matmul(gap,gap_w) return", "fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.))", "return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases)", "with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3) bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3)", "bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b)", "get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return", "tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with", "cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name):", "Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return", "numpy as np import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels", "tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name):", "x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=[output_size],initializer=tf.constant_initializer(0.)) fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope) return fc def inference(self,rgb,train=False): rgb*=255. r,g,b=tf.split(rgb,3,3)", "class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name]", "return layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name)", "def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0]", "output=tf.matmul(gap,gap_w) return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output def get_classmap(self,label,conv6): conv6_resized=tf.image.resize_bilinear(conv6,[224,224]) with tf.variable_scope(\"GAP\",reuse=True): label_w=tf.gather(tf.transpose(tf.get_variable(\"W\")),label) label_w=tf.reshape(label_w,[-1,1024,1]) conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024]) classmap=tf.matmul(conv6_resized,label_w) classmap=tf.reshape(classmap,[-1,224,224])", "get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b))", "np import cPickle import ipdb class Detector(): def __init__(self,weight_file_path,n_labels): self.image_mean=[103.939,116.779,123.68] self.n_labels=n_labels with open(weight_file_path)as", "def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False):", "if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope: cw=tf.get_variable(\"W\",shape=cw.shape,initializer=tf.constant_initializer(cw)) b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope)", "def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[1] def get_conv_weight(self,name): f=self.get_weight(name)", "b=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope) return fc def new_fc_layer(self,bottom,input_size,output_size,name): shape=bottom.get_shape().to_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01))", "self.n_labels=n_labels with open(weight_file_path)as f: self.pretrained_weights=cPickle.load(f) def get_weight(self,layer_name): layer=self.pretrained_weights[layer_name] return layer[0] def get_bias(self,layer_name): layer=self.pretrained_weights[layer_name]", "layer[1] def get_conv_weight(self,name): f=self.get_weight(name) return f.transpose((2,3,1,0)) def conv_layer(self,bottom,name): with tf.variable_scope(name)as scope: w=self.get_conv_weight(name) b=self.get_bias(name)", "conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def fc_layer(self,bottom,name,create=False): shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\":", "w=self.get_conv_weight(name) b=self.get_bias(name) conv_weights=tf.get_variable(\"W\",shape=w.shape,initializer=tf.constant_initializer(w)) conv_biases=tf.get_variable(\"b\",shape=b.shape,initializer=tf.constant_initializer(b)) conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,conv_biases) relu=tf.nn.relu(bias,name=name) return relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as", "shape=bottom.get_shape().as_list() dim=np.prod(shape[1:]) x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with", "relu def new_conv_layer(self,bottom,filter_shape,name): with tf.variable_scope(name)as scope: w=tf.get_variable(\"W\",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01)) b=tf.get_variable(\"b\",shape=filter_shape[-1],initializer=tf.constant_initializer(0.)) conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME') bias=tf.nn.bias_add(conv,b) return bias def", "x=tf.reshape(bottom,[-1,dim]) cw=self.get_weight(name) b=self.get_bias(name) if name==\"fc6\": cw=cw.reshape((4096,512,7,7)) cw=cw.transpose((2,3,1,0)) cw=cw.reshape((25088,4096)) else: cw=cw.transpose((1,0)) with tf.variable_scope(name)as scope:" ]
[ "probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets) # total_loss += loss.item() loss.backward()", "hook on layer for name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) #", "loss = criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove() return total_loss /", "= device self.max_epochs = 10 self.verbose = verbose rep = None def _hook(model,", "loss = criterion(outputs, targets) # acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step()", "= verbose rep = None def _hook(model, inp, out): rep = out def", "dequeue trainloader through model and train probe self._ # dequeue valloader through probe", "layer.register_forward_hook(_hook) # dequeue trainloader through model and train probe self._ # dequeue valloader", "10 self.verbose = verbose rep = None def _hook(model, inp, out): rep =", "len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float:", "criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on layer for", "criterion): for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for", "LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10 self.verbose =", "torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook", "model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through model and train probe", "torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets) #", "= probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets) # total_loss += loss.item()", "name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through model", "torch from torch.nn import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False):", "criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove() return total_loss / len(trainloader), acc.compute().item()", "= 0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device)", "criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on layer for name, module", "_hook(model, inp, out): rep = out def fit_all(self): pass def _fit_probe(self, model, probe,", "_ = model(inputs) outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss", "from torch.nn import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device", "# def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device)", "= model(inputs) outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss +=", "optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets) # total_loss", "out): rep = out def fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer,", "valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() #", "# dequeue valloader through probe and measure loss / acc probe.eval() acc =", "with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device)", "handle = layer.register_forward_hook(_hook) # dequeue trainloader through model and train probe self._ #", "(inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ =", "torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _", "model and train probe self._ # dequeue valloader through probe and measure loss", "= optimizer(probe.parameters(), lr=1e-4) # register hook on layer for name, module in model.named_modules():", "enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep) loss", "# register hook on layer for name, module in model.named_modules(): module._forward_hooks.clear() handle =", "_ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets) # acc(outputs,", "-> float: global rep model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes),", "= criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on layer for name,", "measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad():", "# acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets) in enumerate(train_loader):", "valloader through probe and measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss", "def _hook(model, inp, out): rep = out def fit_all(self): pass def _fit_probe(self, model,", "__init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10 self.verbose = verbose rep", "targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep)", "optimizer(probe.parameters(), lr=1e-4) # register hook on layer for name, module in model.named_modules(): module._forward_hooks.clear()", "inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs =", "register hook on layer for name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook)", "verbose rep = None def _hook(model, inp, out): rep = out def fit_all(self):", "= torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader):", "global rep model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device)", "= inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep) loss = criterion(outputs, targets)", "device self.max_epochs = 10 self.verbose = verbose rep = None def _hook(model, inp,", "targets.to(device) _ = model(inputs) outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs, targets)", "inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep) loss =", "print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10,", "<reponame>paulgavrikov/torchbox import torch from torch.nn import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self,", "enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs", "total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\")", "layer for name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader", "and measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with", "for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad():", "= torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4)", "range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets) in", "probe self._ # dequeue valloader through probe and measure loss / acc probe.eval()", "dequeue valloader through probe and measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device)", "import torch from torch.nn import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\",", "outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove()", "out def fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch", "trainloader through model and train probe self._ # dequeue valloader through probe and", "device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10 self.verbose = verbose rep =", "classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() # def", "train_loader, optimizer, criterion): for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss =", "LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs =", "layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() # def probe", "= probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove() return", "pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs): #", "# total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss /", "trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval()", "if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes,", "and train probe self._ # dequeue valloader through probe and measure loss /", "import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device", "in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets)", "total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets", "torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on", "{epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1,", "total_loss = 0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device),", "through model and train probe self._ # dequeue valloader through probe and measure", "self._ # dequeue valloader through probe and measure loss / acc probe.eval() acc", "torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) #", "probe and measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0", "optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader,", "inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss =", "optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on layer for name, module in", "on layer for name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue", "model(inputs) outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item()", "inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep) loss = criterion(outputs, targets) acc(outputs,", "Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs", "self.max_epochs = 10 self.verbose = verbose rep = None def _hook(model, inp, out):", "/ acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx,", "targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs)", "verbose=False): self.device = device self.max_epochs = 10 self.verbose = verbose rep = None", "optimizer=torch.optim.Adam) -> float: global rep model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(),", "rep = out def fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion):", "in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through model and train", "in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep)", "epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() # def probe probe", "acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx, (inputs,", "(inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs", "device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() # def probe probe =", "loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model,", "model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets) #", "probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(),", "batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs)", "# dequeue trainloader through model and train probe self._ # dequeue valloader through", "rep model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion", "self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer,", "= layer.register_forward_hook(_hook) # dequeue trainloader through model and train probe self._ # dequeue", "for name, module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through", "for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ =", "lr=1e-4) # register hook on layer for name, module in model.named_modules(): module._forward_hooks.clear() handle", "torch.nn import Flatten, LazyLinear, Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device =", "fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs):", "train probe self._ # dequeue valloader through probe and measure loss / acc", "through probe and measure loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss =", "/ len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) ->", "criterion(outputs, targets) # acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step() if self.verbose:", "= out def fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for", "module in model.named_modules(): module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through model and", "= criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove() return total_loss / len(trainloader),", "loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader,", "acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs,", "+= loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}\") def", "targets) # acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch:", "rep = None def _hook(model, inp, out): rep = out def fit_all(self): pass", "acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss:", "= 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets =", "def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10 self.verbose = verbose", "self.verbose = verbose rep = None def _hook(model, inp, out): rep = out", "criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep model.eval() # def probe probe = torch.nn.Sequential(", "probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets)", "= model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets)", "targets) in enumerate(valloader): inputs, targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs =", "0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs, targets = inputs.to(device),", "Softmax class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10", "targets) # total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss", "fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global rep", "class LinearProbe: def __init__(self, device=\"cpu\", verbose=False): self.device = device self.max_epochs = 10 self.verbose", "inp, out): rep = out def fit_all(self): pass def _fit_probe(self, model, probe, train_loader,", "= 10 self.verbose = verbose rep = None def _hook(model, inp, out): rep", "targets = inputs.to(device), targets.to(device) _ = model(inputs) outputs = probe(rep) loss = criterion(outputs,", "optimizer, criterion): for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0", "def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs): # acc", "model, probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device)", "{total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam)", "= criterion(outputs, targets) # acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step() if", "loss / acc probe.eval() acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for", "probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer =", "module._forward_hooks.clear() handle = layer.register_forward_hook(_hook) # dequeue trainloader through model and train probe self._", "targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs,", "in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad()", "= None def _hook(model, inp, out): rep = out def fit_all(self): pass def", "None def _hook(model, inp, out): rep = out def fit_all(self): pass def _fit_probe(self,", "float: global rep model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax()", "probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss", "train-loss: {total_loss / len(train_loader)}\") def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(),", "def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer", "probe(rep) loss = criterion(outputs, targets) acc(outputs, targets) total_loss += loss.item() handle.remove() return total_loss", ").to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register hook on layer", "# acc(outputs, targets) # total_loss += loss.item() loss.backward() optimizer.step() if self.verbose: print(f\"epoch: {epoch}/{self.max_epochs}", "torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(valloader): inputs,", "torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion = criterion.to(device) optimizer = optimizer(probe.parameters(), lr=1e-4) # register", "batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _", "def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1, criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float: global", "acc = torchmetrics.Accuracy().to(device) total_loss = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in", "= inputs.to(self.device), targets.to(self.device) with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss", "for epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx,", "with torch.no_grad(): _ = model(inputs) optimizer.zero_grad() outputs = probe(rep) loss = criterion(outputs, targets)", "0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets = inputs.to(self.device), targets.to(self.device) with", "def fit_all(self): pass def _fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch in", "torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets =", "_fit_probe(self, model, probe, train_loader, optimizer, criterion): for epoch in range(self.max_epochs): # acc =", "= torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs, targets) in enumerate(train_loader): inputs, targets", "self.device = device self.max_epochs = 10 self.verbose = verbose rep = None def", "epoch in range(self.max_epochs): # acc = torchmetrics.Accuracy().to(self.device) total_loss = 0 for batch_idx, (inputs,", "model.eval() # def probe probe = torch.nn.Sequential( torch.nn.Flatten(), torch.nn.LazyLinear(classes), torch.nn.Softmax() ).to(device) criterion =", "outputs = probe(rep) loss = criterion(outputs, targets) # acc(outputs, targets) # total_loss +=" ]
[ "with an array of characters but instead of returning the result (j), line", "pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code may look fancy, but all", "to return the answer # all x is is ['i', 'm', 'a', 'e',", "(j), line 7 is return; j[:why:-1] Note the semicolon which acts as a", "the spaces gives the answer ['n', 'i', 'c', 'e'] Also, when we define", "of returning the result (j), line 7 is return; j[:why:-1] Note the semicolon", "x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to return the answer #", "with the code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code", "line 7 is return; j[:why:-1] Note the semicolon which acts as a linebreak,", "is return; j[:why:-1] Note the semicolon which acts as a linebreak, so j[:why:-1]", "semicolon to return the answer # all x is is ['i', 'm', 'a',", "'a', 'e', 'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this", "all the spaces gives the answer ['n', 'i', 'c', 'e'] Also, when we", "'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls the function", "face doing?\" treeCTF{110_105_99_101} Actually though, the code may look fancy, but all it's", "doing is doing some random stuff with an array of characters but instead", "Actually though, the code may look fancy, but all it's doing is doing", "look fancy, but all it's doing is doing some random stuff with an", "for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to return", "i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to return the", "but instead of returning the result (j), line 7 is return; j[:why:-1] Note", "Note the semicolon which acts as a linebreak, so j[:why:-1] is never executed", "'i', 'c', 'e'] Also, when we define x, we misspelled 'eval' x=evl(...) should", "that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code may look fancy, but", "the result (j), line 7 is return; j[:why:-1] Note the semicolon which acts", "characters but instead of returning the result (j), line 7 is return; j[:why:-1]", "which acts as a linebreak, so j[:why:-1] is never executed Removing all the", "though, the code may look fancy, but all it's doing is doing some", "executed Removing all the spaces gives the answer ['n', 'i', 'c', 'e'] Also,", "it's doing is doing some random stuff with an array of characters but", "pretty code!!! \"description\": \"There are two characters wrong with the code. What's that", "['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c'];", "fancy, but all it's doing is doing some random stuff with an array", "so j[:why:-1] is never executed Removing all the spaces gives the answer ['n',", "7 is return; j[:why:-1] Note the semicolon which acts as a linebreak, so", "wrong with the code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the", "\"\"\" woo pretty code!!! \"description\": \"There are two characters wrong with the code.", "when we define x, we misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def", "x, we misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[] for", "misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[] for i in", "code may look fancy, but all it's doing is doing some random stuff", "code!!! \"description\": \"There are two characters wrong with the code. What's that pretty", "['n', 'i', 'c', 'e'] Also, when we define x, we misspelled 'eval' x=evl(...)", "woo pretty code!!! \"description\": \"There are two characters wrong with the code. What's", "two characters wrong with the code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually", "<reponame>wanqizhu/treectf<gh_stars>1-10 \"\"\" woo pretty code!!! \"description\": \"There are two characters wrong with the", "spaces gives the answer ['n', 'i', 'c', 'e'] Also, when we define x,", "Also, when we define x, we misspelled 'eval' x=evl(...) should be eval(...) \"\"\"", "we misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[] for i", "the semicolon which acts as a linebreak, so j[:why:-1] is never executed Removing", "return the answer # all x is is ['i', 'm', 'a', 'e', 'f',", "stuff with an array of characters but instead of returning the result (j),", "'c', 'e'] Also, when we define x, we misspelled 'eval' x=evl(...) should be", "def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the", "define x, we misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[]", "array of characters but instead of returning the result (j), line 7 is", "is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck", "# remove the semicolon to return the answer # all x is is", "'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls the", "j[:why:-1] # remove the semicolon to return the answer # all x is", "j[:why:-1] is never executed Removing all the spaces gives the answer ['n', 'i',", "doing?\" treeCTF{110_105_99_101} Actually though, the code may look fancy, but all it's doing", "is doing some random stuff with an array of characters but instead of", "in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to return the answer", "the answer # all x is is ['i', 'm', 'a', 'e', 'f', 'n',", "# all x is is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c']", "j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to", "x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)])", "sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon", "run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls the function sup(x, 3) sup(x,3)", "linebreak, so j[:why:-1] is never executed Removing all the spaces gives the answer", "return; j[:why:-1] # remove the semicolon to return the answer # all x", "are two characters wrong with the code. What's that pretty face doing?\" treeCTF{110_105_99_101}", "'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls", "the semicolon to return the answer # all x is is ['i', 'm',", "acts as a linebreak, so j[:why:-1] is never executed Removing all the spaces", "code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code may look", "\"\"\" def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove", "returning the result (j), line 7 is return; j[:why:-1] Note the semicolon which", "result (j), line 7 is return; j[:why:-1] Note the semicolon which acts as", "Removing all the spaces gives the answer ['n', 'i', 'c', 'e'] Also, when", "random stuff with an array of characters but instead of returning the result", "the code may look fancy, but all it's doing is doing some random", "'e'] Also, when we define x, we misspelled 'eval' x=evl(...) should be eval(...)", "doing some random stuff with an array of characters but instead of returning", "'e', 'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just", "semicolon which acts as a linebreak, so j[:why:-1] is never executed Removing all", "# run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls the function sup(x, 3)", "is never executed Removing all the spaces gives the answer ['n', 'i', 'c',", "x is is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c'] # run", "remove the semicolon to return the answer # all x is is ['i',", "gives the answer ['n', 'i', 'c', 'e'] Also, when we define x, we", "as a linebreak, so j[:why:-1] is never executed Removing all the spaces gives", "treeCTF{110_105_99_101} Actually though, the code may look fancy, but all it's doing is", "answer ['n', 'i', 'c', 'e'] Also, when we define x, we misspelled 'eval'", "\"description\": \"There are two characters wrong with the code. What's that pretty face", "is is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c'] # run through", "'m', 'a', 'e', 'f', 'n', 'e', 'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; #", "instead of returning the result (j), line 7 is return; j[:why:-1] Note the", "may look fancy, but all it's doing is doing some random stuff with", "should be eval(...) \"\"\" def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return;", "an array of characters but instead of returning the result (j), line 7", "of characters but instead of returning the result (j), line 7 is return;", "a linebreak, so j[:why:-1] is never executed Removing all the spaces gives the", "'c'] # run through github.com/wanqizhu/pyfuck x=['i','m','an'[0],\"\"\"emotional\"\"\"[0],'friend'[0],\"\"\"hellotherehowareyoudoingthisisanicefoxfenn\"\"\"[-1],'e','c']; # this just calls the function sup(x,", "eval(...) \"\"\" def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] #", "never executed Removing all the spaces gives the answer ['n', 'i', 'c', 'e']", "'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why): j=[] for i in x:", "all it's doing is doing some random stuff with an array of characters", "return; j[:why:-1] Note the semicolon which acts as a linebreak, so j[:why:-1] is", "j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1] # remove the semicolon to return the answer # all", "answer # all x is is ['i', 'm', 'a', 'e', 'f', 'n', 'e',", "\"There are two characters wrong with the code. What's that pretty face doing?\"", "characters wrong with the code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though,", "but all it's doing is doing some random stuff with an array of", "some random stuff with an array of characters but instead of returning the", "all x is is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c'] #", "j[:why:-1] Note the semicolon which acts as a linebreak, so j[:why:-1] is never", "be eval(...) \"\"\" def sup(x,why): j=[] for i in x: j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)]) return; j[:why:-1]", "What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code may look fancy,", "we define x, we misspelled 'eval' x=evl(...) should be eval(...) \"\"\" def sup(x,why):", "the code. What's that pretty face doing?\" treeCTF{110_105_99_101} Actually though, the code may", "the answer ['n', 'i', 'c', 'e'] Also, when we define x, we misspelled" ]
[ "import db_session class Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit()", "update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in keys: exec(\"entity.{0} = kwargs['{0}']\".format(key)) return", "db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys", "\"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\"", "objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in keys:", "deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in", "db_session class Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def", "db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs):", "**kwargs): keys = kwargs.keys() for key in keys: exec(\"entity.{0} = kwargs['{0}']\".format(key)) return entity", "for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self)", "from helpers.database import db_session class Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\"", "saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit()", "delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys()", "objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity,", "for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key", "db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in keys: exec(\"entity.{0} =", "<reponame>kwanj-k/flask_sm<filename>utilities/utility.py from helpers.database import db_session class Utility(object): def save(self): \"\"\"Function for saving new", "def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys =", "save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting", "\"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for", "db_session.delete(self) db_session.commit() def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in keys: exec(\"entity.{0}", "new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for deleting objects\"\"\" db_session.delete(self) db_session.commit() def", "def update_entity_fields(entity, **kwargs): keys = kwargs.keys() for key in keys: exec(\"entity.{0} = kwargs['{0}']\".format(key))", "helpers.database import db_session class Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self)", "def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function for", "class Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self):", "Utility(object): def save(self): \"\"\"Function for saving new objects\"\"\" db_session.add(self) db_session.commit() def delete(self): \"\"\"Function" ]